--- /dev/null
+Except where indicated otherwise, this software is:
+Copyright (c) 2006 Scott Wood <scott@buserror.net>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * The names of the Software's authors and/or contributors
+ may not be used to endorse or promote products derived from
+ this Software without specific prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
--- /dev/null
+SUBDIRS = idlcomp idl include lib kernel
+
+all :
+ for i in $(SUBDIRS); do if ! ($(MAKE) -C $$i); then break; fi; done
+
+.PHONY: $(SUBDIRS)
+$(SUBDIRS):
+ $(MAKE) -C $@
+
+.PHONY : all
+
+clean :
+ for i in $(SUBDIRS); do $(MAKE) -C $$i clean; done
+
+distclean:
+ for i in $(SUBDIRS); do $(MAKE) -C $$i distclean; done
+
+dep :
+ @echo Top-level "make dep" not supported\; dependencies will be done automatically.
+
+depend : dep
+
+ifeq (.depend,$(wildcard .depend))
+include .depend
+endif
--- /dev/null
+# Makefile.head -- the initial portion of a generic makefile
+# framework to be included at the top directory of each component.
+#
+# Before including this file, the TOP variable should be set to the
+# absolute path of the top-level project directory, and the COMP
+# variable should be set to the path to the component's top-level
+# directory, relative to the project's top-level directory. After
+# including this file, include the directory makefiles, and then
+# include Makefile.tail. Before including Makefile.tail, TARGETS
+# should be set to the default targets to be made when "make" is
+# invoked with no arguments. The target rules themselves can be
+# declared after Makefile.tail in order to use variables such as OBJS
+# which are set in Makefile.tail.
+#
+# WARNING: These makefiles will not work if you have spaces in any
+# pathnames, including any parent directory outside the project
+# hierarchy. This is a limitation of GNU make (quoting things
+# doesn't help). Sorry.
+#
+# The makefiles also probably won't work very well if the component
+# directory is a symlink, and the top-level directory is the symbolic
+# parent (i.e. dirname `pwd -L`) rather than the physical parent
+# (i.e. dirname `pwd -P`).
+#
+# NOTE: due to suckage of make that I haven't figured out how to work
+# around, you'll have to run make twice after you update a .cdl file
+# in order to make sure that any code that includes the generated
+# headers has been rebuilt. This is despite having an explicit
+# dependency of the .cc file on the normal .h, of the normal .h on
+# the generated .h, of the generated headers on the server directory,
+# and of the server directory on the CDL file. The CDL->directory
+# rule is used to generated the headers, but even if it's done
+# specifically for the .cc file up the chain, it doesn't rebuild the
+# .cc file.
+
+TARGET := $(TOP)/Makefile.target
+
+ifeq ($(TARGET),$(wildcard $(TARGET)))
+include $(TARGET)
+endif
+
+CXXINCS += -I$(TOP)/include/c++ -I$(GENINCLUDES)/c++ \
+ -I$(TOP)/include/c -I$(TOP)/include/c/std \
+ -I$(BUILDDIR)/include
+
+WARN += -Wall -Werror
+OPT += -O2
+DEBUG += -g3
+
+CXXFLAGS += $(CXXINCS) $(DEFS) $(CXXWARN) $(OPT) $(DEBUG)
+BUILDCXXFLAGS += $(BUILDDEFS) $(CXXWARN) $(OPT) $(DEBUG)
+
+.PHONY: all default rerun dep servers clean distclean
+.SUFFIXES:
+
+all: default
+
+# Make sure "make" treats thse as the right type of variable
+# when += is used later.
+DIRS :=
+
+CFILES :=
+CXXFILES :=
+ASFILES :=
+
+GENCFILES :=
+GENCXXFILES :=
+GENASFILES :=
+
+BUILDCFILES :=
+BUILDCXXFILES :=
+
+BUILDGENCFILES :=
+BUILDGENCXXFILES :=
--- /dev/null
+# Makefile.tail -- see Makefile.head for usage information
+
+ASSRCS := $(ASFILES:%=%.S)
+CSRCS := $(CFILES:%=%.c)
+CXXSRCS := $(CXXFILES:%=%.cc)
+
+ASOBJS := $(ASFILES:%=$(BUILDDIR)/%.o)
+GENASOBJS := $(GENASFILES:%=$(BUILDDIR)/%.o)
+COBJS := $(CFILES:%=$(BUILDDIR)/%.o)
+GENCOBJS := $(GENCFILES:%=$(BUILDDIR)/%.o)
+CXXOBJS := $(CXXFILES:%=$(BUILDDIR)/%.o)
+GENCXXOBJS := $(GENCXXFILES:%=$(BUILDDIR)/%.o)
+
+# ASOBJS must come first, so that the kernel entry code can be
+# at the beginning of the output image.
+
+FILES := $(ASFILES) $(CFILES) $(CXXFILES)
+OBJS := $(ASOBJS) $(COBJS) $(CXXOBJS) $(GENASOBJS) $(GENCOBJS) $(GENCXXOBJS)
+GENSRCS := $(GENASFILES:%=$(BUILDDIR)/%.S) $(GENCFILES:%=$(BUILDDIR)/%.c) \
+ $(GENCXXFILES:%=$(BUILDDIR)/%.cc)
+SRCS := $(ASFILES:%=%.S) $(CFILES:%=%.c) $(CXXFILES:%=%.cc)
+
+BUILDCOBJS := $(BUILDCFILES:%=$(BUILDDIR)/%.o)
+BUILDGENCOBJS := $(BUILDGENCFILES:%=$(BUILDDIR)/%.o)
+BUILDCXXOBJS := $(BUILDCXXFILES:%=$(BUILDDIR)/%.o)
+BUILDGENCXXOBJS := $(BUILDGENCXXFILES:%=$(BUILDDIR)/%.o)
+
+BUILDOBJS := $(BUILDCOBJS) $(BUILDCXXOBJS) $(BUILDGENCOBJS) $(BUILDGENCXXOBJS)
+BUILDGENSRCS := $(BUILDGENCFILES:%=$(BUILDDIR)/%.c) $(BUILDGENCXXFILES:%=$(BUILDDIR)/%.cc)
+BUILDSRCS := $(BUILDCFILES:%=%.c) $(BUILDCXXFILES:%=%.cc)
+
+$(ASOBJS): $(BUILDDIR)/%.o: %.S
+ @echo $(COMP): "$<"
+ @$(MKDIR) $(dir $@)
+ @$(CC) $(ASFLAGS) -c -o "$@" "$<"
+
+$(COBJS): $(BUILDDIR)/%.o: %.c
+ @echo $(COMP): "$<"
+ @$(MKDIR) $(dir $@)
+ @$(CC) $(CFLAGS) -c -o "$@" "$<"
+
+$(CXXOBJS): $(BUILDDIR)/%.o: %.cc
+ @echo $(COMP): "$<"
+ @$(MKDIR) $(dir $@)
+ @$(CXX) $(CXXFLAGS) -c -o "$@" "$<"
+
+$(GENASOBJS): %.o: %.S
+ @echo $(COMP): "$<"
+ @$(MKDIR) $(dir $@)
+ @$(CC) $(ASFLAGS) -c -o "$@" "$<"
+
+$(GENCOBJS): %.o: %.c
+ @echo $(COMP): "$<"
+ @$(MKDIR) $(dir $@)
+ @$(CC) $(CFLAGS) -c -o "$@" "$<"
+
+$(GENCXXOBJS): %.o: %.cc
+ @echo $(COMP): "$<"
+ @$(MKDIR) $(dir $@)
+ @$(CXX) $(CXXFLAGS) -c -o "$@" "$<"
+
+$(BUILDCOBJS): $(BUILDDIR)/%.o: %.c
+ @echo $(COMP): "$<"
+ @$(MKDIR) $(dir $@)
+ @$(BUILDCC) $(BUILDCFLAGS) -c -o "$@" "$<"
+
+$(BUILDCXXOBJS): $(BUILDDIR)/%.o: %.cc
+ @echo $(COMP): "$<"
+ @$(MKDIR) $(dir $@)
+ @$(BUILDCXX) $(BUILDCXXFLAGS) -c -o "$@" "$<"
+
+$(BUILDGENCOBJS): %.o: %.c
+ @echo $(COMP): "$<"
+ @$(MKDIR) $(dir $@)
+ @$(BUILDCC) $(BUILDCFLAGS) -c -o "$@" "$<"
+
+$(BUILDGENCXXOBJS): %.o: %.cc
+ @echo $(COMP): "$<"
+ @$(MKDIR) $(dir $@)
+ @$(BUILDCXX) $(BUILDCXXFLAGS) -c -o "$@" "$<"
+
+clean: $(EXTRACLEAN)
+ $(RM) $(foreach dir,$(DIRS),$(BUILDDIR)/$(dir)*.o)
+ $(RMDIR) $(BUILDDIR)/include/servers
+ $(RMDIR) $(TARGETS) $(GENSRCS) $(BUILDGENSRCS)
+
+distclean: clean $(EXTRADISTCLEAN)
+ $(RM) .gdb_history
+ $(RMDIR) $(BUILDDIR)
+
+# Every .o file which has a corresponding .cdl file will
+# depend on the server stubs.
+
+SERVERS := $(wildcard $(FILES:%=%.cdl))
+SERVERS := $(SERVERS:%.cdl=%)
+
+$(SERVERS:%=$(SERVERDIR)/%): $(SERVERDIR)/%: %.cdl $(IFACES)
+ @echo $(COMP): "$<"
+ @$(RMDIR) "$@"
+ @if ! $(IDLC) -t $(ARCH) -l c++ -r -i "$(IFACES)" -o "$@" "$<"; then \
+ $(RMDIR) "$@"; \
+ false; \
+ fi
+
+.DELETE_ON_ERROR: $(SERVERS:%=$(SERVERDIR)/%)
+
+$(SERVERS:%=%.cc): %.cc: $(SERVERDIR)/%
+servers: $(SERVERS:%=$(SERVERDIR)/%)
+
+ifneq ($(NODEPS),y)
+dep: servers $(PREDEP)
+ @echo $(COMP): Generating dependencies
+ @$(RM) "$(BUILDDIR)"/.depend
+ @$(MKDIR) "$(BUILDDIR)"
+ @$(TOUCH) "$(BUILDDIR)"/.depend
+ @for i in $(SRCS); do if [ -f "$$i" ]; then \
+ OBJ=`echo "$$i" | sed s/\\.\[^.\]*$$//`.o; \
+ $(CXX) $(CXXFLAGS) -DMAKEDEP -M -MT "$(BUILDDIR)/$$OBJ" "$$i" >> "$(BUILDDIR)"/.depend; \
+ fi; done
+ @for i in $(GENSRCS); do if [ -f "$$i" ]; then \
+ OBJ=`echo "$$i" | sed s/\\.\[^.\]*$$//`.o; \
+ $(CXX) $(CXXFLAGS) -DMAKEDEP -M -MT "$$OBJ" "$$i" >> "$(BUILDDIR)"/.depend; \
+ fi; done
+ @for i in $(BUILDSRCS); do if [ -f "$$i" ]; then \
+ OBJ=`echo "$$i" | sed s/\\.\[^.\]*$$//`.o; \
+ $(BUILDCXX) $(BUILDCXXFLAGS) -DMAKEDEP -M -MT "$(BUILDDIR)/$$OBJ" "$$i" >> "$(BUILDDIR)"/.depend; \
+ fi; done
+ @for i in $(BUILDGENSRCS); do if [ -f "$$i" ]; then \
+ OBJ=`echo "$$i" | sed s/\\.\[^.\]*$$//`.o; \
+ $(BUILDCXX) $(BUILDCXXFLAGS) -DMAKEDEP -M -MT "$$OBJ" "$$i" >> "$(BUILDDIR)"/.depend; \
+ fi; done
+ @for i in $(SERVERS); do \
+ $(IDLC) -M -l c++ -r -i "$(IFACES)" -o "$(SERVERDIR)/$$i" \
+ "$$i.cdl" >> "$(BUILDDIR)"/.depend; \
+ done
+
+depend: dep
+
+ifeq ($(BUILDDIR)/.depend,$(wildcard $(BUILDDIR)/.depend))
+include $(BUILDDIR)/.depend
+default: $(TARGETS)
+else
+rerun: dep
+ @$(MAKE)
+
+default: rerun
+endif
+
+else
+dep:
+depend:
+
+default: $(TARGETS)
+endif
--- /dev/null
+ifndef TARGETDEFS
+TARGETDEFS := done
+
+ifndef ARCH
+$(error Please define $$(ARCH).)
+endif
+
+VALIDARCH := no
+
+ifeq ($(ARCH),x86)
+CROSS := i686-polintos-
+DEFS += -DBITFIELD_LE
+VALIDARCH := yes
+endif
+
+ifeq ($(ARCH),x64)
+CROSS := x86_64-polintos-
+DEFS += -DBITFIELD_LE
+VALIDARCH := yes
+endif
+
+ifeq ($(VALIDARCH),no)
+$(error $(ARCH) is not a supported target.)
+endif
+
+ifndef USECROSS
+CROSS :=
+endif
+
+ifdef NOSMP
+DEFS += -D_LL_NOSMP
+endif
+
+# C++ prohibits defeferencing a NULL non-POD pointer (and thus
+# prohibits using offsetof on non-POD types, even though there's no
+# good reason to disallow it). Some headers use offsetof on non-POD
+# types (typically, the types are non-POD only because they have a
+# constructor to initialize fields to a known state (or with
+# passed-in values); hardly a reason to make it completely non-POD
+# and throw out a bazillion otherwise legal actions).
+#
+# Thus, since I know of no way (such as a #pragma) for code to
+# disable a warning itself for a specific region of code, everything
+# needs to disable this warning. On one hand, I consider it a bogus
+# warning, and thus it's not that big of a deal. On the other hand,
+# having the warning suppressed means you won't know if your code
+# will produce the warning in other environments without actually
+# trying it there. Oh well; blame the C++ and/or GCC people, not me.
+#
+# This warning option requires at least GCC 3.4. If you want to use
+# an older compiler, remove this and live with the warnings (as well
+# as whatever other unknown issues you encounter).
+
+CXXWARN += $(WARN) -Wno-invalid-offsetof
+
+ifndef BUILDTYPE
+BUILDTYPE := user
+endif
+
+BUILDNAME := build
+BASEBUILDDIR := $(TOP)/$(BUILDNAME)
+ARCHBUILDDIR := $(TOP)/$(BUILDNAME)/$(ARCH)
+
+ifeq ($(ARCHINDEP),y)
+BUILDDIR := $(BASEBUILDDIR)/$(BUILDTYPE)/$(COMP)
+else
+BUILDDIR := $(ARCHBUILDDIR)/$(BUILDTYPE)/$(COMP)
+endif
+
+UTILBUILDDIR := $(BASEBUILDDIR)/build/utils
+SERVERDIR := $(BUILDDIR)/include/servers
+
+IFACES := $(BASEBUILDDIR)/build/idl/ifaces
+GENINCLUDES := $(ARCHBUILDDIR)/build/include/generated
+
+BUILDCC := g++
+BUILDCXX := g++
+CC := $(CROSS)g++
+CXX := $(CROSS)g++
+AS := $(CROSS)as
+LD := $(CROSS)ld
+STRIP := $(CROSS)strip
+DEFS += -D_LL_ARCH_$(ARCH) -D_LL_ARCH=$(ARCH)
+MKDIR := mkdir -p
+MV := mv
+RM := rm -f
+RMDIR := rm -rf
+LN := ln -s
+IDLC := $(BASEBUILDDIR)/build/idlcomp/idlc
+TOUCH := touch
+BISON := bison
+FLEX := flex
+
+# If you want to cross-compile idlc, set BUILD_ENDIAN to LE or BE
+# and build only the idlc component. Also, set BUILDCXX to the
+# cross compiler.
+
+ifndef BUILD_ENDIAN
+$(shell $(MKDIR) $(UTILBUILDDIR))
+$(shell $(BUILDCC) $(TOP)/utils/buildendian.c -o $(UTILBUILDDIR)/buildendian)
+BUILD_ENDIAN := $(shell $(UTILBUILDDIR)/buildendian)
+endif
+
+ifndef BUILD_BFENDIAN
+$(shell $(MKDIR) $(UTILBUILDDIR))
+$(shell $(BUILDCC) $(TOP)/utils/buildbfendian.c -o $(UTILBUILDDIR)/buildbfendian)
+BUILD_BFENDIAN := $(shell $(UTILBUILDDIR)/buildendian)
+endif
+
+BUILDDEFS += -DBITFIELD_$(BUILD_BFENDIAN)
+
+endif
--- /dev/null
+Method Invocation:
+ Caller:
+ a0: object ID
+ a1: method ID
+
+ a2: pointer to args (low addr to high, padded as in a struct):
+ Out arrays can be preallocated. Scalar outs are ignored,
+ but space is made for them. NULL array params and arrays
+ which are larger than the preallocated buffer are
+ allocated by the callee. Arrays are passed as a pointer
+ and a 64-bit element count, in that order. Objects are
+ passed as pointers.
+
+ Call a special symbol TBD to invoke the method.
+
+ Upon return:
+ params on stack, with out params filled in and out params clobbered
+
+ t0-t11, at, ra clobbered
+ v0: pointer to exception, or NULL if none.
+ a2: pointer to args, with out params filled in; in params
+ may be clobbered
+
+ Callee:
+ params on stack (low addr to high), ids replaced with pointers,
+ at least 8 bytes of spare space beyond the high element
+
+ a0: object pointer
+ a1: pointer to caller information struct, if such was
+ requested
+ ra: return address
+
+ Upon return:
+
+ params on stack (low addr to high), in params may be clobbered
+ t0-t11, at, ra may be clobbered
+ v0: pointer to exception, or NULL if none.
+
+Stack:
+ sp is stack pointer, grows down, decrement before store
+
+Object structure:
+ The object ID is stored as a 64-bit quantity at an offset
+ specified by calling a method TBD.
+
+Wrapper object creation:
+ The function to create wrapper objects is specified by calling a
+ method TBD. The function shall conform to the local ABI, and
+ takes an ID as a 64-bit integer as the first parameter, and a
+ pointer to the class as the second. It returns a pointer.
+
+ Wrapper objects may be preemptively declared to avoid infinite
+ loops by calling a method TBD.
+
+Struct padding:
+ All fields are padded so that basic types are naturally aligned.
--- /dev/null
+Function Calls and In-Process Method Invocation:
+ SysV i386 ABI
+
+Out-of-Process Method Invocation:
+ Caller:
+ eax: object ID
+ ecx: method ID
+
+ edx: pointer to parameter info block (PIB), described below
+
+ Call the 32-bit address stored at 0x7fff0000 to invoke the method.
+
+ Upon return:
+ ebx, esi, edi, ebp, esp: preserved
+ eax: pointer to exception, or NULL if none.
+ If there is an exception, the user part of the syscall
+ function will search for an exception handling function
+ that covers the calling address. If none is found,
+ it will assume that it is a language without exception
+ handling, and return the exception to the caller in eax.
+ ecx: clobbered
+ edx: pointer to args, with out params filled in; in params
+ may be clobbered. This will be the same pointer as
+ was passed in by the caller.
+
+ Callee:
+ params on stack (low addr to high), ids replaced with pointers,
+ at least 4 bytes of spare space beyond the high element
+
+ eax: object pointer
+ edx: pointer to caller information struct, if such was
+ requested
+ ecx: return address
+
+ Upon return:
+
+ params on stack (low addr to high), in params may be clobbered
+ eax: pointer to exception, or NULL if none.
+ ebx, esi, edi, ebp, esp: should be preserved
+ ecx, edx: may be clobbered
+
+Stack:
+ esp is stack pointer, grows down, decrement before store
+
+Object structure:
+ The object ID is stored as a 32-bit quantity at an offset
+ specified by calling a method TBD.
+
+Wrapper object creation:
+ The function to create wrapper objects is specified by calling a method
+ TBD. The function shall conform to the local ABI, and takes an ID as a
+ 32-bit integer as the first parameter, and a pointer to the class as
+ the second. It returns a pointer.
+
+ Wrapper objects may be preemptively declared to avoid infinite loops by
+ calling a method TBD.
+
+Struct padding:
+ All fields are padded so that basic types are naturally aligned.
+
--- /dev/null
+The PolIntOS project contains code from various sources and under
+various licenses. Many of these licenses require that binary
+distributions contain attribution and/or the full license text; this
+is where such things go. This file only applies to the base PolIntOS
+project; add-on components which have been separated out due to
+license compatibility issues or for other reasons will have their
+attributions within the component in question.
+
+Original code written by Scott Wood
+===================================
+
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+Portions of the C library which come from newlib
+================================================
+
+/* Copyright (c) 2002 Red Hat Incorporated.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ The name of Red Hat Incorporated may not be used to endorse
+ or promote products derived from this software without specific
+ prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
--- /dev/null
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
--- /dev/null
+These are some of the differences between CORBA and this object
+model (this list is not meant to be exhaustive):
+
+No preprocessor in the IDL.
+
+No double-extended floating point type (most architectures don't
+support it, and its main purpose is extra precision for temporary
+calculations, where RPC is somewhat irrelevant).
+
+No fixed point type, at least not as a basic type. Perhaps as a
+value type...
+
+All characters are Unicode; if you need to pass characters or strings
+in ISO Latin-1 and/or other character sets, use the octet type.
+
+A string type exists, which holds characters in a UTF-8 encoding.
+
+No "any" type (use objects that encapsulate basic types instead (or
+maybe built-in value types?)).
+
+No unions.
+
+Sequences are not a special case; they are merely arrays whose
+minimum and maximum lengths are not equal.
+
+No multi-dimensional arrays.
+
+No value types for now (though they look interesting). At the
+moment, I'm leaning towards implementing this as a binding between a
+class and a struct.
+
+Instead of "oneway", there's "async". The basic idea is the same,
+except that async method invocations have the same execution
+guarantees as normal methods (that is, it may never complete because
+the remote machine is down, or the remote process is hung; however,
+it will never be dropped due to things like packet loss).
+
+There is no return type (only out parameters). This may change.
+
+No attributes (yet).
+
+Structs can inherit from other structs, and this hierarchy can be
+used for exceptions. All structs inherit from the root Struct type.
+
--- /dev/null
+General
+=======
+
+Each IDL namespace (as opposed to an interface, struct, etc. namespace)
+is mapped to a C++ header file and a directory. The name of the
+directory is the name of the namespace; the name of the file is the
+namespace name, followed by ".h". The header file and the directory
+are both part of the same directory; the former does not go in the
+latter. The directory contains sub-IDL-namespaces; if there are no
+such namespaces, the implementation is permitted, but not required, to
+create an empty directory.
+
+Each sub-IDL-namespace's header file is #included from its parent
+header file, so that #including the top-level header file shall be
+sufficient to access any descendent namespace.
+
+Each header file shall contain protection against being #included
+multiple times. This protection may not interfere with that of any
+other namespace.
+
+All identifiers and preprocessor symbols beginning with _IDL_CPP_ are
+reserved for use by the language binding.
+
+Types
+=====
+
+IDL types are mapped to C++ types as follows:
+
+ IDL Type C++ Type
+ ======== ========
+
+ bool uint8_t
+ octet uint8_t
+ short int16_t
+ ushort uint16_t
+ int int32_t
+ uint uint32_t
+ long int64_t
+ ulong uint64_t
+ fshort float
+ flong double
+ char uint8_t (UTF8)
+
+FIXME: currently char simply acts as octet; should it refer to an
+actual character, and thus be uint32_t? If so, then unless the
+language supports UTF8 natively, an array of char would still be an
+array of octets, which could be confusing... Alternatively, get rid
+of char as a basic type, and use a typedef of octet to indicate that
+this particular octet is UTF8.
+
+Namespaces
+==========
+
+IDL namespaces are mapped directly to C++ namespaces. Symbols
+declared inside a struct or interface which are not methods or
+non-const data members will go into a parallel namespace, which is
+the type-namespace name with "_ns" appended.
+
+Enumerations
+============
+
+As the size of a native C++ enum type is not guaranteed, and since
+they are not type-safe or namespace-safe, IDL enums are mapped to C++
+structs. Each enumeration value is declared inside the struct as an
+anonymous enum value, and there is a _val entry which is an unsigned
+integer of the smallest type that can hold the IDL enum size.
+
+Bitfields
+=========
+
+An IDL bitfield is mapped to a C++ struct using field width
+specifiers on the members. Integral members are mapped directly to
+C++ unsigned integers of the smallest type that can hold the IDL
+bitfield size, with a field size specifier.
+
+When a bitfield is used as the type of a bitfield entry, it cannot be
+directly mapped, as C++ does not allow non-integral types to have
+field size specifiers. Instead, get and set methods are provided to
+pack and unpack the data, and the contents of the bitfield type are
+directly inlined in the enclosing bitfield. If the field name is foo
+and the type is Foo, then the methods will be "Foo get_foo() and
+set_foo(Foo foo)". If a field name begins with "set_" or "get_", it
+will be preceded with an underscore for the field itself (but not for
+the set or get methods). Thus, a field called set_foo in IDL would
+become _set_foo, but a get method (if required) would be get_set_foo,
+not get__set_foo. The underscore prefix happens regardless of
+whether the field requires get and set methods.
+
+With an embedded bitfield, the inlined fields have the sub-bitfield
+name prefixed to the field name. Thus, if a bitfield Foo with fields
+a and b is included in a bitfield Bar as fields c and d, then Bar will have
+fields c_IDLNS_a, c_IDLNS_b, d_IDLNS_a, and d_IDLNS_b, and methods
+set_c(Foo), Foo get_c(), set_d(Foo), and Foo get_d(). The IDLNS is
+ugly, but there's no way I know of to use a C++ namespace mechanism
+without introducing unwanted padding (even __attribute__((packed))
+will byte-align a substruct).
+
+Likewise, embedded enumerations cannot use the actual enumeration
+struct as a C++ bitfield member; they are treated as a simple
+integers instead. Get and set methods are not needed, as enums will
+convert to integers, but they may be added in the future to provide a
+type-safe alternative to direct assignment.
+
+Objects
+=======
+
+A reference to an object has the name of the type, with no pointer or
+reference marker. If uninitialized, they default to NULL. Methods
+are called in the same way as on a C++ class, using the dot, not the
+arrow. To increment an object's reference count, call the retain()
+method. To decrement it, call the release() method. Weak references
+and autorelease pools will be added later.
+
+Classes
+=======
+
+The implementor of a class must #include the generated .h file for
+the class in a public: portion of the class definition. The class's
+constructor must call init_iface() before any object references to
+the object are used. The generated footer.cc file must be included
+in exactly one non-header file. It is recommended that this be the
+file in which the methods are defined, so that they can be inlined by
+the stubs; however, the only requirement is that it have access to
+the class definition and any interface headers the implemented
+interfaces reference.
--- /dev/null
+When a non-globally-qualified identifier is used that could
+potentially refer to more than one symbol in different namespaces,
+the following rules are used to determine which, if any, is assumed
+to be the intended symbol:
+
+The current stack of parent namespaces (not parent types) is
+searched, starting with the current namespace and proceeding outward.
+At each level, the following searches are made in order:
+
+1. All symbols declared directly in the namespace, including aliases
+and symbols imported with a specific using statement.
+
+2. Symbols imported by a using namespace statement at that level. If
+more than one imported symbol is matched, it is an error. Symbols
+imported by a using statement take effect immediately, and are used
+for subsequent imports, even in the same statement. Lookups for any
+purpose *other* than a using statement use all importations,
+regardless of whether they come before or after the lookup in the
+file.
+
+Symbols defined in namespaces of inherited types are not imported, as
+I felt that for IDL, the benefit wasn't worth the implementation
+complexity (there are some chicken-and-egg problems; aliases declared
+in the namespace need to be looked up early so that alias chains can
+be formed, but inherited types need to be looked up earlier so that
+they can be used for namespace searches, but types can be inherited
+using alias names...).
+
+Note that importing a namespace does not bring the symbol name of that
+namespace into the current namespace along with its context. In
+certain circumstances, this could lead to problems if the name used
+to refer to the namespace is shadowed by an imported name. This
+won't necessarily cause an error to be reported, if the name that was
+used for importing the namespace was imported or declared in an outer
+namespace, and the alternate name is valid in the context used. In
+general, you should not use whole-namespace imports except for
+closely related bits of code/interfaces where you can be sure that
+nothing unintended is getting imported.
+
+At some point, I may change this so that imported namespace names
+(including the path as used to import) do get added as private
+aliases. If so, it could apply to inherited types as well.
+
+If a match is found at one rule, the search stops, and no further
+rules or levels are considered. If an error occurs, the search also
+stops (i.e. a proper match at a later level does not undo the error).
+
+If no symbols were found in the above steps, the global namespace is
+searched.
+
+Only the first component of a namespace-qualified name is looked up;
+further components are not used for disambiguation.
+
+Note that when using a namespace either in a namespace statement, or
+when creating a type using a qualified identifier, no searching is
+done, but the current namespace is assumed (or global, if there's a
+leading ".."). If the namespace does not exist, it is created.
+
+Example:
+
+namespace A {
+ struct D {
+ };
+
+ struct F {
+ };
+
+ // A.G is declared as a namespace; no attempt is made to look for
+ // a global G.
+ struct G.Q {
+ };
+}
+
+struct B {
+ struct C {
+ struct X {
+ };
+ };
+
+ struct G {
+ };
+
+ struct H {
+ };
+
+ struct I {
+ };
+};
+
+namespace C {
+ using A.*;
+
+ struct D {
+ };
+
+ struct E {
+ };
+
+ struct F {
+ };
+
+ struct H {
+ };
+
+ struct I {
+ };
+
+ struct B.E {
+ struct F {
+ };
+
+ struct E {
+ };
+
+ B b; // Error: Uses C.B, rule #1 once C is reached,
+ // but C.B is not a type
+ D d; // Uses C.D, rule #1 once C is reached
+ B.E be; // Uses C.B.E, rule #1 once C is reached
+ E e; // Uses C.B.E.E, rule #1 in current namespace
+ E.E ee; // Error, as the first E matches C.B.E.E, and
+ // there is no C.B.E.E.E. The extra .E is
+ // not used for disambiguation.
+ F f; // Uses C.B.E.F, rule #1 in current namespace
+ G g; // Error, Uses A.G, rule #2 once namespace C is
+ // reached, but A.G is not a type
+ H h; // Uses C.H, rule #1 once namespace C is reached.
+ };
+}
+
+struct D {
+ // Imports B and B.C, not B and C. If it were "using C.*, B.*",
+ // then it would import C and C.B.
+
+ using B.*, C.*;
+ using ..B.I;
+
+ struct E {
+ B b; // Error: Uses C.B, rule #2 once D is reached, but C.B
+ // not a type. Only the contents of namespaces are
+ // imported, not the names themselves.
+ C.D cd; // Error. For the same reason as above, C refers
+ // to B.C, not C. There is no D in B.C.
+ C.X cx; // Uses B.C.X.
+ D d; // Uses C.D, rule #2 when D is reached. D itself
+ // is declared in the global namespace, which doesn't
+ // get reached because a match is found sooner.
+ E e; // Uses D.E, rule #1 once D is reached. C.E would
+ // have been matched by rule #2 if D.E did not exist.
+ F f; // Uses C.F, rule #2 once D is reached. A.F is not
+ // matched, as importation is not transitive.
+ G g; // Uses B.G, rule #2
+ H h; // Error, both B.H and C.H match on rule #2 once
+ // D is reached. B does not get precedence for
+ // coming first, or because the C importation
+ // comes after this lookup.
+ I i; // Uses B.I, as specific-symbol imports are treated
+ // as symbols declared in this namespace, rather than
+ // imports (and thus B.I has precedence over C.I).
+ };
+
+ int I; // Error; this conflicts with the specific-symbol
+ // importation of B.I.
+
+ // Now C is imported. This importation is valid through the
+ // entire namespace (except for prior using statements), not just
+ // after this point.
+
+ using ..C.*;
+};
+
+
+It goes without saying that it is not recommended that code be
+written which relies excessively on these rules; they are mainly
+intended so that:
+
+1. Local namespaces are not unduly restricted by what some imported
+library declares, and
+
+2. If there is an ambiguity, all compilers will handle it the same
+way.
--- /dev/null
+IDs are used to identify a specific objects and methods to the ORB. Each
+address space has its own object ID space, but method IDs are a separate ID
+space that is shared by all users of an ORB. Each interface does *not*
+form its own ID space for methods. Classes, interfaces, and struct types
+are merely objects of specific classes, and thus share the object ID space.
+
+Object IDs
+==========
+
+Applications interact with objects via pointers (or references, or whatever
+else is appropriate for the language), not IDs. This includes structs
+intended for marshalling to another address space. To deal with this, the
+ORB accepts all object references in pointer form, rather than ID form.
+For object references, the language mapping ensures that the ID can be
+found from the pointer in a manner consistent with the platform ABI.
+Typically, it is stored as some fixed offset of the first word of the
+entity pointed to. Structs do not have IDs; see the "memory-management"
+file for information on how they are marshalled across address spaces.
+
+When presenting the object to the destination address space, the ORB needs
+to convert the local object ID to the object ID in the server's ID space.
+If the object has not yet been used in that address space, there will be no
+local ID (or pointer) associated with it. If this is the case, an object
+stub will be created by the ORB.
+
+To maintain the ID->pointer mapping, the ORB will maintain a pointer object
+for every local ID in an ID space that is not the "home" of the real
+object. These objects, whose memory is chargeable to the address space
+they were created for, contain the application address associated with the
+ID, as well as a pointer to the real object. The pointer object may also
+hold cached results of permission checks.
+
+For every real object, the ORB maintains a similar internal object, but
+instead of a pointer to a real object, it contains all the information
+needed for the ORB to carry out requests on the ID.
+
+Method IDs
+==========
+
+Each method has an ID which is global to the ORB, which is used when
+specifying which method to invoke on a given object. A method will also
+have an interface-local index "id", which can be used internally for things
+such as indexing into a pointer-object's cached permission checks, but such
+indices are not application-visible. The only pointer associated with a
+method is its entry point, which is not visible to the client.
+
--- /dev/null
+1. Method Parameters
+====================
+
+1.1 Overview
+============
+
+The writeability, sharedness, and lifetime of memory passed by reference
+as a parameter (in or out) depends on the parameter attributes, as well as
+whether the method is asynchronous. The implementation of the semantics
+will be different for in-process and remote methods, and not all
+restrcitions are enforced as strictly for in-process methods, but the
+semantics when the rules are followed must be the same for both.
+
+The data type of parameter also affects how it is handled. Built-in
+datatypes, bitfields, enums, and inline structs and arrays are passed
+directly by value (or, depending on the ABI, simple reference-valid-
+until-end-of-method for out parameters), and thus do not require memory
+management. Object references are reference counted both at the ORB and
+process level, and are unique in that the client data structures are
+read-only and uncopyable (because a unique object needs a unique address)
+within an address space; most of this section does not apply to them.
+
+That leaves non-inline structs and arrays. The ideal way of treating
+these depends on how they're being used. Simple, small structs and arrays
+would benefit from being passed as a simple pointer (plus a length field
+in the case of arrays), with the method copying anything it wants to keep
+and/or change. For async methods, the caller would need to avoid
+modifying referenced memory until it knows the method has been executed
+(in the case of remote methods, it only matters until the ORB has COWed or
+copied the memory, but client code should not depend on this, or it will
+not work with an in-process server). This is the default behavior if no
+parameter attributes are specified. With this type of parameter, there
+are no restrictions on what allocator the memory being passed came from
+for in parameters. "Out" and inout parameters will be discussed later.
+
+The only major complexity here is in how the duplicate is made. Struct
+duplications will need to do a deep copy without being confused by
+reference loops; the stubs will need to provide a "duplicate" function
+that does this (possibly using a hash table or other associative array to
+identify reference loops if IDLC determines that they are possible with
+that particular type of struct).
+
+To avoid a double-copy, out-of-process methods may want to merely ask the
+ORB to give it full, permanent access (via COW if not already copied) to
+the memory. This may be hard to implement, though, as it requires the
+server to know that the data came from an out-of-process implementation
+(even if it's been passed to other functions, some of which may be
+in-process object-model methods). This optimization may only be useful
+for large amounts of data that is not likely to have most of its pages
+modified; if it is likely to be heavily modified, then COW doesn't help
+much, and may hurt (and if it's large, it probably hasn't already been
+copied). It may be better to not implement this optimization, and instead
+recommend that the user use a parameter attribute to deal with the
+problem.
+
+1.2 Parameter Attributes
+========================
+
+With larger arrays and complex data structures, one would often benefit
+from being able to avoid the copy altogether. This can be accomplished by
+altering the interface semantics. All of the parameter attributes but
+"copy" do this, and thus cannot be changed without breaking interface
+compatibility. None of the current parameter attributes can be combined
+with any other current parameter attribute.
+
+1.2.1 Default Semantics
+=======================
+
+If no attribute is specified, "in" parameters are visible only until the
+method returns, and are read-only. There will be a "duplicate" function
+provided by the language binding for the server to use if it wants to
+retain and/or write to the data. For "small" data (the threshold needs to
+be empirically determined), it just makes a copy. For "large" data, the
+pages will be copy-on-write mapped (unless the caller asks for an
+immediate copy). The only real reason not to use the immediate flag for
+small data (as determined by the programmer) as well (rather than have a
+threshold) is so that the threshold can be tunable based on the relative
+performance of copies versus traps on a given system. It'd also be nice
+if the programmer could ask a profiler to determine whether large data
+should be COWed or copied immediately on a per-call basis.
+
+When the "duplicate" function is called, a copy-on-write mapping of the
+data will be created. Edge data will be overmapped regardless of page
+type, but the overmap status will be retained (so that edge pages will not
+be overmapped into some other address space), though read overmap will be
+promoted to read/write overmap, as the extra data in the copy will not be
+used anymore. There will be an option to the "duplicate" function to
+create fully overmappable pages by copying the edge data and zeroing the
+rest of the edge pages (in case the caller wants to share the data).
+
+1.2.2 Copy
+==========
+
+The "copy" attribute affects only the implementation; changing it does not
+break interface compatibility (and thus require a new GUID). As such, the
+use of this attribute is specified in the CDL rather than the IDL.
+Language bindings that do not require a CDL file will provide some way of
+specifying copy semantics directly from the implementation language.
+
+This attribute directs the ORB to automatically make a copy (possibly via
+COW, but no read-only or shared mappings) of the parameter. For
+in-process invocation, methods with any "copy" parameters will need to go
+through a stub to do the copy before calling the real method.
+
+1.2.3 Shared
+============
+
+The "shared" attribute declares that the method implementation and caller
+will treat the data as a read/write shared memory region, lasting beyond
+the end of the method. The caller must ensure that all segments of data
+provided (including every nested struct, or struct/array in an array of
+structs/arrays) either begins and ends exactly on a page boundary, or has
+all partial pages marked for read/write overmap. For out-of-process
+methods, an exception will be thrown if this is not the case. For
+in-process methods, you'll merely go to hell for writing bugs that won't
+show up until someone hands your code a reference to an out-of-process
+implementation. All data must be under the management of the ORB memory
+manager.
+
+The shared region is terminated when the caller or callee frees the memory
+using the ORB memory manager. This requires calling some function that
+knows whether to actually free it (in the out-of-process case), or release
+a reference or something (in the in-process case). For arrays, where
+we're already using a struct with pointer and length, adding a reference
+count pointer wouldn't be a big deal. Struct pointers, OTOH, are
+currently plain pointers, and adding an indirection struct with pointer
+and reference pointer would be ugly, but doable. The alternative is to
+have the memory manager look up the memory fragment and find the refcount
+in its internal data structures, which would require a lookup for every
+reference count operation.
+
+1.2.4 Push
+==========
+
+The "push" attribute transfers the memory region to the destination,
+unlinking it from the source address space. The source memory region will
+be unmapped for out-of-process invocations; for in-process invocations,
+the memory region will simply belong to the callee, and the caller must
+not reference it any more. Like "shared", all data fragments must either
+begin and end on a page boundary, or be in a page with read/write overmap
+enabled. It is also required that every page being pushed be under the
+management of the ORB allocator.
+
+1.2.5 Inline
+============
+
+When used as a parameter attribute (either explicitly or as part of the
+type definition), "inline" specifies that the struct or array will be
+passed directly as a value parameter. A NULL reference cannot be passed,
+and for out parameters, the method cannot return a reference to an
+existing struct or array, but instead must fill in the reference given.
+
+The "inline" attribute is similar to "copy", except that no in-process
+stub is required because it is part of the interface (though a stub may be
+required in certain languages if they do not provide automatic copying of
+value-passed structs/arrays).
+
+An inline array cannot be of variable length, and may be treated
+differently from other arrays in the language binding (e.g. in C++, inline
+arrays are bare language arrays, and do not use the Array or MutableArray
+wrappers).
+
+The "inline" attribute can also be used on members of a struct, to
+indicate that the member is embedded directly in the outer struct, rather
+than linked with a pointer.
+
+1.2.6 Immutable
+===============
+
+The "immutable" attribute is similar to "const" in C/C++ ("const" in
+IDL is used only for compile-time constants), and specifies that the
+array or struct cannot be modified through this particular reference.
+It is the default for parameters when neither "shared" nor "push" is
+used ("copy" and "inline" parameters will accept immutable references
+on the caller side, but will produce a mutable copy for the server
+side). It may be specified without effect when it is the default.
+
+Immutable "shared"/"push" parameters will result in read-only
+mappings in the destination address space, though for "push"
+parameters, the process will have permission to enable writes (this
+is intended for use by the ORB memory manager when the memory is
+freed).
+
+The "immutable" attribute may also be used on members of a struct.
+
+1.3 Asynchronous methods
+========================
+
+Most of the semantics are the same for asynchronous methods; however, the
+points at which the method begins and ends are less clear. As far as the
+ORB is concerned, an async method begins when it processes the message.
+When invoking an async method, there should be a mechanism to get a handle
+to track the progress of the invocation. This can be used by the caller
+to try to cancel the method on a timeout, which can only be done if the
+message has not yet been accepted by the recipient. Once the message has
+been accepted, in the in-process, non-"copy" case, the caller must not
+touch the data after this point until it receives a message from the
+callee that it is finished. In the out-of-process case, if a caller loses
+patience with the callee, it can free the memory (thus making it exist
+only in the callee's address space, non-shared).
+
+1.4 Out parameters
+==================
+
+When a struct or array is being returned via an out or inout parameter,
+there is no end of method on which to base reference lifetime. As such,
+if neither "shared" nor "inline" is specified, an out parameter is treated
+as "push". The default of "push" only applies to the out half of an inout
+parameter; in general, use of inout should be probably limited to value
+types and parameters that use "push" in both directions so as to avoid
+potentially confusing semantics.
+
+To return static data that does not need to be freed, out parameters can
+use the "copy" implementation attribute. The interface semantics will
+still be "push", but the ORB (or a wrapper function for in-process calls)
+will allocate a pushable buffer and copy the data into it. If the static
+data is managed by the ORB memory manager, it will reference count the
+page rather than make a copy if the buffer is of sufficient size.
+
+1.5 Exceptions
+==============
+
+Exceptions are thrown as copy/push out parameters. This will often mean
+unnecessary copies at in-process IDL method boundaries, but exceptions
+should be small and infrequently thrown, and usually not managed by the
+ORB memory manager except across method boundaries.
+
+1.6 Unmet needs
+===============
+
+It is currently impossible to specify attributes (other than "immutable"
+and "inline") on individual struct members. This would be useful to pass
+a struct normally that contains a status code or other metadata along with
+a pushed or shared buffer, without making the outer struct also pushed or
+shared. It would also be useful to pass a pushed buffer through some
+fixed superstruct such as a NotifierInfo.
+
+2. The ORB Memory Manager (ORBMM)
+=================================
+
+The ORB memory manager keeps track of memory allocated by the ORB during
+an out-of-process method invocation. It is also used for allocations made
+by user code for memory that may need to be freed by another component in
+the same address space (such as when using the shared or push attributes).
+
+A reference count is kept on each page, so that shared-within-a-process
+mappings must be released by both caller and callee before the memory is
+freed. Passing a chunk of data through a "shared" parameter to in-process
+method increments the page's reference count; this requires a memory
+manager lookup for every such parameter. Because of this, consider using
+"push" instead if sharing is not required.
+
+In the out-of-process case, each mapping is freed separately, and the
+kernel handles reference counting the physical page.
+
+2.1 Methods
+===========
+
+Each language binding shall provide a mechanism by which code may call the
+following functions on a given type of array or struct. The prototypes
+are for C++; other language bindings express these methods in whatever
+form is most appropriate. In C++, the ORB memory manager is at
+System::RunTime::orbmm, which is a pointer to an instance of
+System::RunTime::ORBMM.
+
+2.1.1 alloc
+===========
+
+void *ORBMM::alloc(size_t size, ORBMM::AllocGroup *group = NULL);
+
+Allocate the memory required for the given type (and the given array size,
+if the type is an array). A group handle may be passed; if not, no page
+will contain more than one allocation. The reference count on the page is
+incremented if a page has been reused and per-object refcounts are not
+supported; otherwise, the object's reference count is one. If the
+allocation spans multiple pages, it will be tracked as an "object", so
+that each page will have its reference count incremented and/or
+decremented when appropriate.
+
+The implementation may, but is not required to, track reference counts on
+a per-page basis rather than per-object. The former will generally be
+more efficient, but will preclude the reuse of an object's memory upon
+release until the entire page is released.
+
+Alternative forms:
+Type *obj = new(orbmm) Type;
+Type *obj = new(orbmm) Type[];
+Type *obj = new(orbmm, group) Type;
+Type *obj = new(orbmm, group) Type[];
+
+2.1.2 retain
+============
+
+void ORBMM::retain(Region region);
+
+Increment the reference count on the specified object.
+
+The region must refer to only one ORBMM object; the implementation may,
+but is not required to, throw an exception if this rule is violated. If a
+region smaller than the object is retained, it will not prevent other
+pages in the region from being freed.
+
+2.1.3 release
+=============
+
+void ORBMM::release(Region region);
+
+Decrement the reference count on the specified object, freeing it if it
+reaches zero.
+
+It is allowed, but not required, that space in multi-object groups be
+reused when freed, if the same group is used to allocate new objects.
+This is only possible if reference counts are kept on a per-object basis
+rather than per-page.
+
+The region must refer to only one ORBMM object; the implementation may,
+but is not required to, throw an exception if this rule is violated. If a
+region smaller than the object is released resulting in a reference count
+of zero, portions may be freed prior to the rest of the region's reference
+count reaching zero.
+
+2.1.4 super_retain
+==================
+
+void ORBMM::super_retain(Region region);
+
+Increment the reference and super-reference counts on the specified
+object. If the reference count ever goes below the super-reference count,
+an exception is thrown. This mechanism is intended to ease debugging
+reference count problems, by turning memory corruption into an exception.
+
+It would typically be used when a given object is not intended to be
+released until the program exits (or some well-defined cleanup procedure
+is done), such as program and module code and static data. It should also
+be used when a mapping is created using mmap() or other higher-level
+function, so as to be able to detect if such a reference is released
+through release() rather than through the high-level mechanism.
+
+The region must refer to only one ORBMM object; the implementation may,
+but is not required to, throw an exception if this rule is violated.
+
+2.1.5 super_release
+===================
+
+void ORBMM::super_release(Region region);
+
+Decrement the reference and super-reference counts on the given object.
+
+The region must refer to only one ORBMM object; the implementation may,
+but is not required to, throw an exception if this rule is violated.
+
+2.1.6 create_group
+==================
+
+ORBMM::AllocGroup *ORBMM::create_group();
+
+Create a group handle that can be passed to the alloc function to pack
+multiple allocations into the same page(s).
+
+2.1.7 destroy_group
+===================
+
+void ORBMM::destroy_group(ORBMM::AllocGroup *group);
+
+Free the memory associated with the group handle returned by create_group.
+The allocations made under the group are unaffected, and must be released
+separately.
+
+2.1.8 add_region
+================
+
+void *ORBMM::add_region(System::Mem::Region region);
+
+The ORB memory manager can manage reference counts of pages that were not
+allocated using ORBMM. This can be used to refcount non-anonymous
+mappings (and thus make them usable with parameters that require ORBMM
+memory). It can also be used on static pages that will never be freed
+until the program exits.
+
+The add_region method places a non-ORBMM controlled region under ORBMM
+control. The ORBMM may use the existing mapping, or it may remap the
+pages into its own region of the virtual address space. The address that
+it uses will be returned. The entire region will be one ORBMM object.
+
+Upon a reference count of zero, the pages will be unmapped using
+System.AddrSpace.unmap().
--- /dev/null
+Parameter Info Block (PIB), all offsets in pointer-length words
+ Name Offset Meaning
+ buffer_size 0 Size of the destination buffer
+
+ The total number of bytes in all of the segments that require a
+ buffer to be created in the destination address space. This is
+ specified so that the kernel can allocate one large buffer for all
+ segments before traversing the segment list. When returning from a
+ method, the buffer size only includes buffers allocated by the
+ caller; "inout" segments where the caller specified a non-NULL ptr,
+ and the callee did not increase the length, are not included
+ (because the kernel does not need to allocate a caller-side buffer
+ for them).
+
+ objlist_ptr 1 Pointer to the object list
+ objlist_len 2 Length of the object list
+
+ The object list is a list of pointers into segment data describing
+ where object IDs can be found. When copying a segment to the
+ destination address space, it will convert all IDs (allocating a new
+ ID if necessary). The object list must be in order (first by
+ segment, then by address); an exception may be thrown if it is out
+ of order or if it contains invalid entries. Segments with object
+ IDs cannot have the Shared flag. Unmarshalling code should always
+ verify that any ID it expects is actually in the object list.
+
+ ptrlist_ptr 3 Pointer to the pointer list
+ ptrlist_len 4 Length of the pointer list
+
+ The pointer list, like the object list, is a list of pointers to
+ segment data. Each pointer pointed to must also point within
+ segment data, and will be modified by the ORB when copied to point
+ to the equivalent location in the destination address space. The
+ pointer list must be in order (first by segment, then by address);
+ an exception may be thrown if it is out of order of if it contains
+ invalid entries. Segments with internal pointers cannot have the
+ Shared flag (shared segments can still be pointed to, of course).
+ Unmarshalling code should always verify that any internal pointer it
+ expects actually points within a valid segment.
+
+ num_segments 5 Number of data segments
+ segment.ptr 6+n*3 Pointer to data segment
+ segment.len 7+n*3 Length of data segment in bytes
+ segment.flags 8+n*3 Attributes of data segment
+
+ Each segment describes data being transmitted to and/or from the
+ callee. For out segments, the caller may designate a buffer to hold
+ the data, or it may leave the ptr field NULL. The caller may
+ replace an out segment pointer with its own (it must do this if it
+ was NULL), and it may change the length of the segment. Except when
+ flags such as Push or Shared force the kernel to map, rather than
+ copy, the data, it will choose which method to use based on the
+ size, page-alignment, and overmap status of the segment.
+
+Segment Flags (see doc/orb/memory-management for more details):
+ In 0x01 Data is copied/mapped from caller to callee
+ Out 0x02 Data is copied/mapped from callee to caller
+ Shared 0x04 A permanent shared mapping is created
+ Push 0x08 The region is unmapped from the source and
+ transferred to the destination.
+ Inline 0x10 The callee cannot change the length of an
+ Out segment. Ignored for In segments.
+ Immutable 0x20 The segment is to be mapped read-only in
+ the destination.
+ Copy 0x8000 The segment is permanently copied into the
+ destination address space, with read/write
+ access (unless Immutable is set).
--- /dev/null
+TOP := $(shell dirname `pwd -P`)
+COMP := idl
+BUILDTYPE := build
+NODEPS := y
+ARCHINDEP := y
+include ../Makefile.head
+
+IDL := $(shell find . -name '*.idl')
+
+TARGETS := $(BUILDDIR)/ifaces
+
+$(BUILDDIR)/ifaces: $(IDL) $(IDLC)
+ @echo $(COMP): System IDL files
+ @$(RMDIR) $(BUILDDIR)/ifaces
+ @if ! $(IDLC) -o $(BUILDDIR)/ifaces -s System $(IDL); then \
+ $(RMDIR) "$@"; \
+ false; \
+ fi
+
+include ../Makefile.tail
--- /dev/null
+// AddrSpace
+//
+// These are the interfaces through which operations are performed on
+// virtual and physical address spaces.
+//
+
+namespace Mem;
+
+// There are two main kinds of mappable objects:
+// 1. Objects that can be mapped directly into physical address space.
+// This includes memory, device I/O, nested AddrSpaces, etc. Objects
+// with this capability implement the Mappable interface.
+// 2. Objects that are mapped by copying the content into RAM
+// (disks, remote objects, dynamically generated data, etc.)
+// These objects implement the Cacheable interface.
+//
+// Type #1 is simple; all it needs to do is translate mappable offsets
+// into physical. Type #2 requires an intervening Cache to manage the
+// backing RAM store. Memory-like type #1 objects should also implement
+// Cacheable so that they can be used remotely.
+//
+// Address spaces may be stacked, such that (for example) a debugger maps
+// an application which maps a file which maps a filesystem which maps a
+// RAID volume which maps several physical disks; only the disks would be
+// represented by Caches, everything else by AddrSpaces. Changes at any
+// level immediately (well, before the operation finishes) propagate up
+// to the page tables higher levels, and clone() will work at any level
+// (and can make either the old or new address space the anonymous-
+// memory-using "shadow" object). Stacked address spaces can also be
+// used to implement access control.
+
+struct Region inline {
+ // Both bounds are inclusive
+ ulong start, end;
+};
+
+struct RegionWithOffset inline : Region {
+ ulong offset;
+};
+
+bitfield AccessFlags:3 {
+ // Allow reads to this page. On some platforms, this is assumed
+ // if writes are allowed, due to hardware limitations.
+ Read,
+
+ // Allow writes to this page.
+ Write,
+
+ // Allow execution of code from this page. On some platforms,
+ // this is assumed if reads are allowed, due to hardware
+ // limitations.
+ Exec
+};
+
+interface BlockObject {
+ guid: "4425AF91-52BE-11DA-BD60-000A95BB581A";
+
+ // Returns the current size in blocks of the object.
+
+ get_size(ulong size out);
+
+ // Returns the object's block size in bytes. Requests to map, read,
+ // write, etc. this object must be done with this granularity. The
+ // block size must be a power of two.
+
+ get_block_size(ulong block_size out);
+};
+
+// Finer grained mapping than the block size may be done via the virtual
+// address space, but the AddrSpace will always request object->physical
+// mappings that are a multiple of the block size.
+
+interface Mappable : BlockObject {
+ guid: "B8E7A0DF-EAB6-11D9-BEFB-000A95BB581A";
+
+};
+
+interface Cacheable : BlockObject {
+ guid: "BB4C729D-EAB6-11D9-ADA2-000A95BB581A";
+
+ // Add the pages in the specified region to the specified cache.
+ // The start and end of the region will be aligned to the larger
+ // of the page size and the block size.
+
+ fill(Cache cache, Region region) async;
+};
+
+// This is the Mappable used to map a Cacheable object.
+// Calling get_subspace on a Cache returns a pure Mappable; holders of
+// such a reference alone can only map it, not add or remove pages or
+// change its cacheable.
+
+interface Cache : Mappable {
+ guid: "BD9A04F5-EAB6-11D9-A420-000A95BB581A";
+
+ // Get and set the object mapped through this Cache. Any other
+ // methods when obj is null (or never set) throw an InvalidState
+ // exception. Calling set_cacheable while this Cache has any
+ // current mappings throws an InvalidState exception.
+
+ set_cacheable(Cacheable obj);
+ get_cacheable(Cacheable obj out);
+
+ // Add one or more pages to the cache. Addr must be page-aligned, and
+ // the size of buf must be a multiple of the page size.
+
+ fill(ulong addr, octet[] buf push) async;
+
+ // Add one or more zero-filled pages to the cache. The region must
+ // start and end on a page boundary.
+
+ fill_zero(Region region) async;
+
+ // Remove one or more pages from the cache. The region must start and
+ // end on a page boundary. It is not an error to remove pages that are
+ // not in the cache.
+
+ remove(Region region);
+
+ // Wait until all of the pages in the region have been added to the
+ // cache. It is the caller's responsibility to ensure that the adding
+ // has been requested, and that blocking will not cause a deadlock.
+ // If there are multiple pages in the region, it is as if this method
+ // were called sequentially on each page; the kernel will not check
+ // whether previously checked pages have been removed while waiting
+ // for later pages.
+
+ block_on_region(Region region);
+
+ // Like block_on_region, but returning a blocker suitable for use
+ // with Thread.block_multi.
+
+ region_blocker(Region region, Proc.Blocker blocker out);
+};
+
+bitfield AllocFlags {
+ Zero, // Zero out any freshly allocated memory.
+
+ Insecure, // It is not necessary to zero out the page after it
+ // is freed, unless the next allocator requests it.
+
+ Commit, // Commit the allocation to actual memory or
+ // swap, failing if this cannot be done.
+
+ Lock, // Only allocate actual memory, and lock it against
+ // swapping. Fails if not enough actual RAM is
+ // available (either in general or in the caller's
+ // locked RAM quota). If Lock is set, Commit is
+ // ignored.
+
+ NoZeroLocal, // Accept secure pages from the current address
+ // space without first zeroing. Ignored if Zero is
+ // specified. Overmap should be set to None when
+ // this is used, and every byte should be
+ // overwritten before data in any such pages is
+ // passed to another address space.
+
+ Paranoid, // Zero pages even when going to a NoZeroLocal allocator
+ // in the current address space. Use this for pages which
+ // are particularly likely to contain sensitive data.
+};
+
+bitfield MapFlags {
+ Fixed, // Fail if the exact starting address is unavailable.
+ // Otherwise, if the supplied starting address
+ // unavailable, the address space manager allocates a
+ // free region and returns it in "start". If this
+ // behavior is explicitly desired (as it ususally is),
+ // "start" should contain all bits set, which is always
+ // invalid. For non-process (stacked) address spaces,
+ // this flag is treated as always set.
+
+ Replace, // Atomically replace any existing mapping with the new
+ // mapping, rather than fail. This flag is only
+ // meaningful if Fixed is set.
+
+ CopyOnWrite, // Share the mapped object only until it is written to;
+ // then, before the write takes place, copy the object.
+ // It is undefined whether this mapping will receive
+ // the copy or the original. Ignored for alloc_and_map.
+
+ AccessFlags access:3,
+ // These are the requested read/write/execute
+ // permissions on the mapping. A missing permission (or
+ // unmapped page) at any level in an address space stack
+ // will cause a MemoryFault, even if the page is mapped
+ // with the needed permissions at the top level. The
+ // map() call will not fail due to such a condition.
+
+ enum OverMap:2 {
+ None, // Never map non-argument memory to a callee's address
+ // space.
+
+ ReadOnly, // Allow read-only mappings of non-argument data within
+ // the same page as an argument into the callee's
+ // address space.
+
+ ReadWrite // Allow all mappings of non-argument data within the
+ // same page as an argument into the callee's address
+ // space.
+ } overmap
+};
+
+interface AddrSpace {
+ guid: "BF9D2070-EAB6-11D9-A2D2-000A95BB581A";
+
+ const ulong unspecified_start = 0xffffffffffffffff;
+
+ // Return the mappable associated with this address space, which can
+ // be used to allow another address space to stack with this one.
+ // The mappable handle only allows pages to be mapped; it does not
+ // allow any changes to the mappings, nor can a handle to the AddrSpace
+ // be obtained from it. This method must always return the same
+ // Mappable object when called on the same AddrSpace.
+
+ get_mappable(Mappable ma out);
+
+ // Create a new AddrSpace that is a copy-on-write clone of this AddrSpace.
+ // This method is used to implement fork() and in-memory file versioning,
+ // and could also be used to assist garbage collection and other purposes.
+ //
+ // By default, the old address space continues to be backed by
+ // whatever Mappables were in use, and pages in the new address space
+ // are backed by anonymous memory when a page in either is written to.
+ // If old_space_is_anon is true, though, this is reversed, which is useful
+ // when versioning a file to make the new version the one that gets
+ // stored to disk.
+
+ clone(AddrSpace addrspace out, bool clone_is_real);
+
+ // Create an anonymous RAM mapping.
+
+ alloc_and_map(ulong len, ulong vstart inout,
+ AllocFlags aflags, MapFlags mflags);
+
+ // Mappable must be implemented by the local kernel, and must hold
+ // read/write/exec permissions appropriate for the MapFlags given.
+
+ map(Mappable ma, Region region, ulong vstart inout, MapFlags flags);
+ unmap(Region region);
+
+ // Set the flags on all pages in the region. CopyOnWrite can be
+ // set, but not cleared, using this method. Fixed is ignored.
+
+ set_mapflags(Region region, MapFlags flags);
+
+ // Returns the flags on the given region, if all pages have the
+ // same flags (except for CopyOnWrite, which is not returned by
+ // this method, as it can be asynchronously cleared).
+
+ get_mapflags(Region region, MapFlags flags out, bool all_same out);
+
+ // Returns the Mappable that covers the specified range, and the offset
+ // into the Mappable that corresponds to the first page in the region.
+ // If any pages within the range are not mapped, or if more than one
+ // Mappable is mapped within the region, or if the offsets into the
+ // Mappable are not contiguous, then NULL is returned in ma.
+ //
+ // This is used to implement mremap().
+
+ get_mapping(Region region, Mappable ma out, ulong offset out);
+
+ // Returns the minimum page size (and thus mapping size/alignment)
+ // supported in this address space. An attempt to create a mapping
+ // that violates this will result in an InvalidArgument exception.
+
+ get_page_size(uint page_size out);
+
+ // Returns the minimum alignment supported for mapping requests;
+ // (vstart % min_align) must equal (region.start % min_align). This
+ // is at least the minimum page size, but may be more on certain
+ // hardware, such as virtually-indexed-physically-tagged caches, where
+ // larger alignment is needed to ensure the absence of cache aliases.
+ // An attempt to create a mapping that violates this will result in an
+ // InvalidArgument exception.
+
+ get_min_align(uint min_align out);
+};
+
+interface AllocHandle {
+ guid: "CB029266-EAB6-11D9-BCA0-000A95BB581A";
+
+ get_regions(Region[] regions out);
+
+ // Free a portion of an allocation. To free all of an allocation,
+ // simply release all references to the handle. Each region shall
+ // start and end on block size boundaries. Throws OperationNotSupported
+ // if the allocator does not support partial frees.
+
+ free(Region[] regions);
+};
+
+interface Allocator {
+ guid: "CCF5D83C-EAB6-11D9-8BB7-000A95BB581A";
+
+ const ulong unspecified_start = 0xffffffffffffffff;
+
+ bitfield AllocFlags {
+ // If set, fail if the exact starting address is unavailable (or if
+ // the allocator does not support caller-supplied starting
+ // addresses).
+ //
+ // Otherwise, if the supplied starting address unavailable, the
+ // allocator allocates a free region and returns it in "start". If
+ // this behavior is explicitly desired (as it ususally is), "start"
+ // should be set to unspecified_start, which is always invalid.
+ // Using unspecified_start may be faster than specifying other
+ // invalid addresses (as the allocator can check for this value
+ // rather than checking address availability), and zero should not
+ // be used for this purpose as it may be a valid address.
+
+ Fixed
+ };
+
+ alloc(ulong start inout, ulong len, AllocFlags flags,
+ AllocHandle handle out);
+};
+
+interface GenericAllocator : Allocator {
+ guid: "D033DD3A-EAB6-11D9-96E4-000A95BB581A";
+
+ // Set the minimal block size used by the allocator.
+ // This can only be done before any alloc() or
+ // add_regions() calls; an InvalidState exception may
+ // be thrown otherwise.
+
+ set_block_size(ulong block_size);
+
+ // Make one or more regions available for allocations.
+ // The regions may not overlap each other or any existing
+ // regions (whether or not allocated). Regions may not
+ // be removed once added using this interface; allocators
+ // may optionally provide a mechanism for doing so.
+
+ add_regions(Region[] regions);
+};
--- /dev/null
+namespace Events;
+using Notifiers.*;
+
+// A plain Event can be passed to the event source
+// if no action should be taken.
+
+struct Event virtual {
+ guid: "24585852-2877-11DA-9148-00112431A05E";
+};
+
+struct NotifierEvent : Event {
+ guid: "2FDEB883-2877-11DA-BBC9-00112431A05E";
+
+ Notifier target;
+ VStruct info;
+};
+
+struct SyncNotifierEvent : Event {
+ guid: "D1F6CC34-2877-11DA-9481-00112431A05E";
+
+ SyncNotifier target;
+ VStruct info;
+};
+
+struct EventInfo virtual {
+ guid: "58FB107A-3693-11DA-B25C-000A95BB581A";
+
+ VStruct dynamic_info;
+};
+
+struct TrapEvent : Event {
+ guid: "D4DB748C-2877-11DA-8846-00112431A05E";
+
+ Traps.Trappable target;
+ Traps.Trap trap;
+};
+
+// If a trap event has a trap of this base type,
+// dynamic_info will be filled in with the info passed
+// to the trigger. Otherwise, the dynamic info
+// will be dropped.
+
+struct EventTrap : Traps.Trap {
+ guid: "F884DC14-36D6-11DA-BAEE-000A95BB581A";
+
+ VStruct dynamic_info;
+};
+
+interface EventDispatcher {
+ guid: "8598ADAE-35E9-11DA-A310-000A95BB581A";
+
+ setup_trigger(Event event, Notifier trigger out);
+ setup_sync_trigger(Event event, SyncNotifier trigger out);
+};
--- /dev/null
+// System exceptions
+//
+// These are all of the exceptions that derive from SystemException,
+// which can implicitly be thrown from any method.
+//
+// Common fields:
+// exp: explanation string, may be NULL
+// param: index (starting from 0) of the parameter which is causing
+// the problem, or -1 if unspecified. This should only be
+// specified for parameters of IDL methods; if thrown from a
+// language function/method which does not know the relevant
+// parameter in the IDL method, it should use an explanation
+// string instead (and not use the index of its own parameter).
+
+namespace Exceptions;
+
+// The base Exception struct; all exceptions derive from this.
+struct Exception virtual {
+ guid: "D88F50D2-2877-11DA-84AD-00112431A05E";
+
+ // The previous exception frame (i.e. that which threw this one),
+ // or null for the original exception frame. Linkage shall be
+ // preserved whenever an exception is either propagated or
+ // raised within an exception handling block.
+
+ Exception prev;
+
+ // Object and method of the thrower of this frame, if it was rethrown
+ // due to crossing an IDL method boundary. Otherwise, both fields are
+ // null.
+
+ Object object;
+// Method method; // Fix throw_idl when this is uncommented.
+
+ // Language and machine specific information about whence an exception
+ // was thrown.
+
+ ExceptionOriginInfo origin;
+};
+
+struct ExceptionOriginInfo virtual {
+ guid: "DBCF8333-2877-11DA-B66E-00112431A05E";
+};
+
+// This struct, or one derived from it, shall be used for an
+// exception's origin field if it is from native code (and
+// thus can refer to instructions by machine address).
+
+struct NativeCodeExceptionOriginInfo : ExceptionOriginInfo {
+ guid: "E1B3FAC6-2877-11DA-8493-00112431A05E";
+
+ // Address of the faulting or throwing instruction
+ ulong pc;
+};
+
+namespace Std;
+
+struct SystemException : Exception {
+ guid: "E5204D31-2877-11DA-A833-00112431A05E";
+
+ bitfield Flags:32 {
+ // Exception was thrown by the kernel. This bit is stripped when
+ // the exception passes through the ORB. It can be used to verify
+ // that MemoryFault and similar exceptions refer to an actual fault
+ // of the current address space when deciding whether to abort the
+ // process.
+
+ Kernel,
+
+ // Exception was generated automatically by the ORB, rather than
+ // thrown by a method. Some types of exceptions have fixed
+ // explanation string formats (and possibly other field usages) for
+ // ORB generated exceptions, but a free form explanation field for
+ // method-thrown exceptions.
+
+ ORB
+ } flags;
+};
+
+
+// The MemoryFault exception indicates that the throwing method
+// attempted to perform an invalid memory access, and the MemoryFault
+// trap was not handled by the faulting process.
+
+struct MemoryFault : SystemException {
+ guid: "EAA266EC-2877-11DA-9C5D-00112431A05E";
+
+ // Address that the method tried to access
+ ulong addr;
+
+ // Address of the faulting instruction
+ ulong pc;
+
+ // Process of faulting method
+ Proc.Process proc;
+
+ // Architecture specific data
+ ArchMemoryFault arch;
+
+ enum Type {
+ // The fault occured on a memory load
+ Load,
+
+ // The fault occured on a memory store
+ Store,
+
+ // The fault occured on an instruction fetch
+ IFetch
+ } type;
+
+ enum Cause {
+ // The faulting address was not mapped
+ Unmapped,
+
+ // The requested operation was prohibited
+ Protected,
+
+ // An I/O error occured accessing a memory mapped region, or an
+ // uncorrectable memory error was encountered.
+ IOError
+ };
+};
+
+struct ArchMemoryFault virtual
+{
+ guid: "EF25EED2-2877-11DA-83EF-00112431A05E";
+};
+
+// The OutOfMemory exception indicates that insufficient physical memory
+// is available to perform the requested operation, including implicit
+// allocations of memory such as memory mapped accesses, shared copy on
+// write regions, and zero pages (though implicit allocation failures
+// cannot happen unless overcommit is enabled). This exception is only
+// thrown after a System.Traps.ReduceMemoryUsage trap has been delivered
+// and failed to free sufficient memory.
+
+struct OutOfMemory : SystemException {
+ guid: "F21A198C-2877-11DA-92DB-00112431A05E";
+};
+
+// Like OutOfMemory, but indicates that disk space, virtual
+// address space, etc. has been exhausted.
+
+struct OutOfSpace : SystemException {
+ guid: "6267B5C2-D2E4-11DA-831C-00112431A05E";
+ char[] exp immutable;
+};
+
+// Like OutOfSpace, but indicates that a specific requested resource
+// (such as an IRQ number or network port) is already in use. Use
+// OutOfSpace instead for resources that are allocated rather than
+// claimed.
+
+struct ResourceBusy : SystemException {
+ guid: "0EFB88FC-2878-11DA-AEB9-00112431A05E";
+
+ int param; // Index of parameter with problem, or -1 if
+ // unspecified, or if not due to a bad param.
+ char[] exp immutable;
+};
+
+/* The InstrFault exception indicates that the throwing method tried
+ to execute an invalid or privileged instruction, and the
+ InstrFault trap was not handled by the faulting process. */
+
+struct InstrFault : SystemException {
+ guid: "F5CAE5F8-2877-11DA-BB6C-00112431A05E";
+
+ // Address of the faulting instruction
+ ulong pc;
+
+ // Process of faulting method
+ Proc.Process proc;
+
+ // Architecture specific data
+ ArchInstrFault arch;
+};
+
+struct ArchInstrFault virtual
+{
+ guid: "F9CE2A55-2877-11DA-9C92-00112431A05E";
+};
+
+struct Token {
+};
+
+// The TokenFault exception indicates that the caller does not
+// currently have the necessary token to complete the requested method.
+
+struct TokenFault : SystemException {
+ guid: "011B018D-2878-11DA-9F80-00112431A05E";
+
+ Token token; // Token that is required but not present
+
+ enum Type {
+ Thrown, // The TokenFault was explicitly thrown by a
+ // called method
+ Call, // The token was required for a method call
+ Open, // The token was required to obtain a handle on
+ // the object
+ } type;
+
+ bitfield Flags:32 {
+ restartable:1 // The method may be restarted if the requested
+ // token is acquired. Any TokenFaults with this
+ // flag set are handled in a transparent manner,
+ // by either acquiring the token and retrying or
+ // clearing the bit and propagating. Any method
+ // that throws a restartable TokenFault must
+ // be cleanly restartable with no side effects.
+ };
+
+ // For Call and Open, this is the object associated with the prohibited
+ // operation. Explicitly thrown token faults may optionally use this
+ // field.
+
+ Object object;
+
+ // For Call, this is the method that could not be called. Explicitly
+ // thrown token faults may optionally use this field.
+
+// Method method; // Fix throw_idl when this is uncommented.
+
+ char[] exp immutable;
+};
+
+// InvalidArgument may be thrown when an argument or combination
+// of arguments is invalid.
+
+struct InvalidArgument : SystemException {
+ guid: "04865176-2878-11DA-9F87-00112431A05E";
+
+ int param; // Index of problematic parameter, counting from 0,
+ // or -1 if not specified.
+ char[] exp immutable;
+};
+
+// GeneralFailure, as the name implies, is a general purpose exception
+// to indicate failure conditions not covered by existing exceptions.
+// It is generally preferable to define a specific exception for
+// forseeable causes of failure.
+
+struct GeneralFailure : SystemException {
+ guid: "094C37A1-2878-11DA-914D-00112431A05E";
+
+ char[] exp immutable;
+};
+
+// InvalidState is thrown when a method is called on an object which
+// is not in a state in which the operation is meaningful.
+
+struct InvalidState : SystemException {
+ guid: "0C361FFF-2878-11DA-ABC1-00112431A05E";
+
+ char[] exp immutable;
+};
+
+
+struct OperationNotSupported : SystemException {
+ guid: "119C274E-2878-11DA-B3B2-00112431A05E";
+
+ char[] exp immutable;
+};
+
+// InvalidReference is thrown when a method is called on a reference
+// that no longer points to a valid object, or if such a dangling
+// reference is found in the method's parameters.
+
+struct InvalidReference : SystemException {
+ guid: "16E5E64F-2878-11DA-916E-00112431A05E";
+
+ int param; // Index of parameter with problem, or -1 if the method
+ // was invoked on a dangling reference. If the invalid
+ // reference is in the object's internal state, then
+ // InvalidState should be thrown instead.
+
+ char[] exp immutable;
+ // When thrown by the ORB, NULL if param is -1 or if the
+ // dangling reference was passed directly as a parameter.
+ // If, however, the dangling reference was in a struct,
+ // this provides the path to the bad parameter (e.g. if
+ // the third parameter of a method is a struct with a
+ // member "foo", and "foo" is a struct with a member
+ // "bar", and bar is a dangling object reference, exp will
+ // contain "foo.bar").
+ //
+ // When not thrown by the ORB, this is a free form
+ // explanation field (and may be NULL).
+};
--- /dev/null
+namespace IO;
+using Notifiers.Notifier;
+
+struct IONotifierInfo virtual {
+ guid: "E38D4FC2-36C7-11DA-B2D1-000A95BB581A";
+
+ ulong len;
+ enum Result {
+ Success,
+ NoMoreData,
+ NoMoreSpace,
+ IOError,
+ BadHandle,
+ NoEndpoint
+ } result;
+};
+
+interface IStream {
+ guid: "6A2E42D6-EAB6-11D9-AD6A-000A95BB581A";
+
+ read(octet[] buf out, ulong len inout);
+ read_async(octet[] buf shared, ulong len, Notifier notifier) async;
+};
+
+interface OStream {
+ guid: "76004CA4-EAB6-11D9-A808-000A95BB581A";
+
+ write(octet[] buf, ulong len inout);
+ write_async(octet[] buf, ulong len, Notifier notifier) async;
+};
+
+interface IOStream : IStream, OStream {
+ guid: "76621658-EAB6-11D9-8884-000A95BB581A";
+};
+
+interface File {
+ guid: "76C2638F-EAB6-11D9-BE5C-000A95BB581A";
+
+ size(ulong size out);
+};
+
+interface ReadableFile : File {
+ guid: "4B46E7A0-E66B-49F5-BB8A-D63833A4D79A";
+
+ read(ulong pos, octet[] buf out, ulong len inout);
+ read_async(ulong pos, octet[] buf shared,
+ ulong len, Notifier notifier) async;
+};
+
+interface WriteableFile : File {
+ guid: "310C44B2-D5F8-4439-A7DB-0BDBFD0C306C";
+
+ write(ulong pos, octet[] buf, ulong len inout);
+ write_async(ulong pos, octet[] buf,
+ ulong len, Notifier notifier) async;
+};
+
+interface ReadWriteFile : ReadableFile, WriteableFile {
+ guid: "61E259EF-A929-449C-A8B8-1870A744F160";
+};
+
+interface HasFile {
+ guid: "540020D6-23AC-4061-9CD7-EEC4118BBAAC";
+
+ set_file(File f);
+ get_file(File f out);
+};
+
+interface Seekable {
+ guid: "772A2170-EAB6-11D9-BAA4-000A95BB581A";
+
+ enum SeekType {
+ FromBeginning,
+ FromCurrent,
+ FromEnd
+ };
+
+ // If the backing file is changed, the position resets to the
+ // beginning.
+ //
+ // The offset can be considered unsigned if access to the full 2^64
+ // bytes of the underlying file is required. Overflow and underflow
+ // cause wraparound.
+
+ seek(long offset, SeekType type);
+ get_pos(ulong offset out);
+ size(ulong size out);
+};
--- /dev/null
+namespace IO.Bus;
+
+struct Resource {
+ Mem.Region region;
+ ResourceSpace rspace;
+ char[] description;
+};
+
+// Whether the alloc() and/or dev_alloc() methods are accepted by a given
+// ResourceSpace is defined by the particular subtype.
+
+interface ResourceSpace : Mem.Allocator {
+ guid: "2515CEC8-F7E0-11D9-9DF4-000A95BB581A";
+
+ dev_alloc(ulong start inout, ulong len, Mem.Allocator.AllocFlags flags,
+ Device dev, Mem.AllocHandle handle out);
+
+ get_resources(Resource[] res out);
+ get_parent(Resource parent out);
+ get_bus(Bus bus out);
+ get_devices(Device[] devs out);
+ get_description(char[] desc out);
+};
+
+interface Device {
+ guid: "1F19CCF2-F7E0-11D9-98CE-000A95BB581A";
+
+ get_resources(Resource[] res out);
+ get_parent(Bus bus out);
+ get_description(char[] desc out);
+};
+
+interface Bus : Device {
+ guid: "196CC482-F7E0-11D9-9AED-000A95BB581A";
+
+ get_devices(Device[] devs out);
+ get_resource_spaces(ResourceSpace[] resource_spaces out);
+
+ // Try to auto-enumerate new devices. If the bus is not capable
+ // of auto-enumeration, do nothing. It is up to the specific bus
+ // type as to whether any action is taken for prevously-enumerated
+ // devices that are no longer detected.
+
+ scan();
+};
--- /dev/null
+namespace IO.Bus.PCI;
+
+interface PCIBus : Bus {
+ guid: "5BB5C0EC-F81D-11D9-BAD0-000A95BB581A";
+ get_interrupt_for_device(PCIDevice dev, uint irqnum out,
+ Interrupts.InterruptController con out);
+};
+
+interface PCIDevice : Device {
+ guid: "5E2A66A0-F81D-11D9-9BFC-000A95BB581A";
+};
+
+interface HostBridge : PCIBus {
+ guid: "60649040-F81D-11D9-A520-000A95BB581A";
+};
+
+interface SubBridge : PCIBus, PCIDevice {
+ guid: "62EF072A-F81D-11D9-862B-000A95BB581A";
+};
--- /dev/null
+namespace IO.Console;
+
+interface Console : System.IO.IOStream {
+ guid: "889C9109-EA8B-11D9-9153-000A95BB581A";
+};
--- /dev/null
+namespace IO.Interrupts;
+
+// alloc() and dev_alloc() will throw OperationNotSupported.
+interface InterruptController : Bus.ResourceSpace {
+ guid: "5B67CF12-F7E0-11D9-8D57-000A95BB581A";
+
+ bitfield UserIntFlags {
+ };
+
+ request_userint(uint irqnum, UserInterruptCallback callback,
+ UserIntFlags flags, Bus.Device device,
+ UserInterrupt handle out);
+};
+
+interface Interrupt {
+ guid: "E7E400CC-F820-11D9-908F-000A95BB581A";
+
+ get_device(Bus.Device dev out);
+ get_controller(InterruptController con out);
+ get_num(uint irqnum out);
+};
+
+interface UserInterrupt : Interrupt, Mem.AllocHandle {
+ guid: "29939E02-F7E0-11D9-B720-000A95BB581A";
+};
+
+interface UserInterruptCallback {
+ guid: "5017C289-F821-11D9-8B10-000A95BB581A";
+ action(UserInterrupt irq);
+};
--- /dev/null
+interface Namespace {
+ guid: "4C81F0BD-EAB6-11D9-86E1-000A95BB581A";
+
+ // This doesn't work with the current idlc.
+// typedef char[] ns_component;
+
+ // FIXME: What about namespace members that are not objects,
+ // but rather arrays, structs, and such?
+
+ lookup_delimit(char[] name immutable, char delimiter, Object obj out);
+// lookup_nodelimit(ns_component[] name, Object obj out);
+};
--- /dev/null
+namespace Notifiers;
+
+interface Notifier {
+ guid: "AF8F1B5C-EAB6-11D9-98AF-000A95BB581A";
+
+ notify(VStruct info) async;
+};
+
+interface SyncNotifier {
+ guid: "E1AA43FC-2400-11DA-A460-00112431A05E";
+
+ notify(VStruct info);
+};
+
--- /dev/null
+/* The Object Manager
+ *
+ * This is the kernel interface through which objects, interfaces, and
+ * interfaces are opened, created and destroyed. The instantiated objmgr
+ * object is always id 1.
+ */
+
+interface Object {
+ guid: "C227980E-EA8B-11D9-84F1-000A95BB581A";
+};
+
+struct VStruct virtual {
+ guid: "E2E3AF5F-2858-11DA-9FBB-00112431A05E";
+};
+
+namespace Objects {
+ // FIXME: make this a built-in ID type that fixes things
+ // up when moving between systems. It can be native word
+ // sized, rather than always long.
+
+ typedef ulong ID;
+
+ struct TypeDesc {
+ enum BasicType {
+ Bool, Short, Int, Long,
+ UShort, UInt, ULong,
+ FShort, FLong, Octet, Enum,
+ Bitfield, Interface, Struct
+ } type;
+
+ ID id; // ID of interface, struct, bitfield, or enum (or 0).
+ ulong length; // Type length: bits in bitfield, bytes in
+ // struct, max value of enum, or 0.
+
+ struct Array {
+ // Inclusive lower and upper bounds of the array. If the
+ // array has no upper bound, upper should be -1. If the
+ // array has no lower bound, lower should be 0. If this is
+ // not an array, both should be 0.
+
+ long lower, upper;
+ } array;
+ };
+
+ struct ParamDesc {
+ TypeDesc type;
+
+ enum Dir {
+ In, Out, InOut
+ } dir;
+
+ bitfield Attr:32 {
+ copy:1, /* If input argument references memory, a
+ copy will be made (possibly via copy-on-write)
+ before the method receives control.
+ If the parameter is inout, the changes (if any)
+ will be copied to the original memory area (or
+ the copy-on-write mapping will occur on return
+ from the method). This is an implementation
+ attribute and is incompatible with shared. */
+
+ shared:1, /* If input argument references memory, the
+ method may continue to access the memory
+ until it explicitly releases it. The memory
+ region must remain valid until then, and all
+ changes to the memory area will be shared by
+ both the caller and the callee. This is an
+ interface attribute and is incompatible with
+ copy. The argument should be page aligned
+ and its size must be a multiple of the page
+ size; otherwise, the caller will have
+ access to data outside the region passed. */
+ } attr;
+ };
+
+ struct MethodDesc {
+ char[] name;
+ ulong entry; /* Address of method entry point */
+ ParamDesc[] params;
+
+ bitfield Flags:32 {
+ async:1, /* Method is invoked indirectly via message passing,
+ and the caller does not wait for completion. No
+ out parameters may be used in such a method. */
+
+ ordered:1, /* Method requires strong ordering; all ordered methods
+ from the same thread will be delivered in order, and
+ the next such method will not be delivered until
+ the recipient of the previous invocation message
+ acknowledges it. This flag is only valid on async
+ methods, as synchronous methods are inherently
+ ordered.
+
+ An alternative is to invoke methods through
+ a serialization object, which allows finer
+ control over the level of serialization.
+ This flag may go away in favor of always
+ using explicit serialization objects when
+ needed. */
+ } flags;
+ };
+
+ struct ClassDesc {
+ ulong[] super; /* superinterface(es) */
+ MethodDesc[] methods;
+ ulong instantiate, subinterface, remove; /* access tokens */
+ };
+
+ struct DatumDesc {
+ char[] name;
+ TypeDesc type;
+ };
+
+ struct StructDesc {
+ DatumDesc[] data;
+ };
+
+ interface Class {
+ guid: "C30D0A85-EA8B-11D9-B985-000A95BB581A";
+ query_interface(Interface iface, bool supported out);
+ instantiate(Object instance out);
+ };
+
+ interface Interface {
+ guid: "C36EBE18-EA8B-11D9-896D-000A95BB581A";
+ };
+
+ interface Method {
+ guid: "C3D1BA69-EA8B-11D9-9439-000A95BB581A";
+ };
+
+ interface Struct {
+ guid: "C4384909-EA8B-11D9-B856-000A95BB581A";
+ };
+
+ interface Filter {
+ guid: "C4A89048-EA8B-11D9-BB2C-000A95BB581A";
+
+ // Returns the object from which the filter was created.
+ // This will only succeed if the calling process already
+ // has a reference to the real object. If the filter
+ // points to another filter, it will return the transitive
+ // real object if possible, or else the filter closest to
+ // the real object for which the process already has a
+ // reference. If neither the real object nor a closer
+ // filter can be returned, this filter itself is returned.
+ // This should not be used for comparing filter references,
+ // as separately created filters for the same object will
+ // have different IDs and pointers, even if they contain
+ // the same subset of interfaces.
+
+ get_real_obj(Object obj out);
+
+ // Returns a local ID of the real object, regardless of whether
+ // the calling process has a reference to it, or whether there
+ // are other intervening filters. The ID cannot be used to
+ // invoke methods, but it can be used to compare the identities
+ // of the objects behind different filter objects. If a real
+ // reference to the object is later obtained, it will have
+ // the same local ID.
+
+ get_real_obj_id(ID id out);
+ };
+
+ interface ObjectManager {
+ guid: "C28596AB-EA8B-11D9-8DEB-000A95BB581A";
+
+ new_object(ID cla, ID obj out);
+ delete_object(ID obj) async;
+
+ new_interface(ID cd, ID cla out);
+ delete_interface(ID cla, bool call_del) async;
+
+ open_object(ID obj, uint handle out);
+ close_object(uint handle) async;
+
+ // Create a filter object that implements only some of the
+ // interfaces implemented by "obj". This is useful to create a
+ // more limited reference to pass to less trusted processes. If
+ // "exclude" is true, then all interfaces but those specified will
+ // be included. Otherwise, only those interfaces specified will be
+ // included. A filter with no interfaces may be created to act as
+ // a (mostly) opaque handle.
+ //
+ // A holder of a filter reference can convert it into the real
+ // object if it already has (or later obtains) a reference to to
+ // the real object. It can also compare the identities of
+ // separately created filters pointing at the same object
+ // regardless of what it has a real reference to. Thus, filters
+ // should be used only to limit access granted by passing a
+ // reference to another process; it should not be used to hide the
+ // identity of the real object.
+
+ create_filter(Object obj, bool exclude, Interface[] ifaces,
+ Filter filter out);
+ };
+
+ // This is a generic Factory interface; specific factories may
+ // (but do not need to) implement a more specific interface that
+ // guarantees that the generated object will comply with some
+ // particular interface.
+
+ interface Factory {
+ guid: "9C084DD0-5D69-11DA-BD5A-000A95BB581A";
+ create(Object obj out);
+ };
+}
--- /dev/null
+// Process
+//
+// These are the interfaces through which operations are performed on
+// processes.
+
+namespace Proc;
+
+interface Process {
+ guid: "9C751874-EAB6-11D9-8399-000A95BB581A";
+};
+
+// This interface is implemented by the Process class.
+interface ThreadFactory {
+ guid: "9FDA3678-EAB6-11D9-97D1-000A95BB581A";
+
+ // Create a new thread in this process. If pc is 0, then the new
+ // thread begins execution immediately after the call to new_thread.
+ // Otherwise, the new thread begins execution at the address passed in
+ // pc. If stack is 0, then the new thread begins with the same stack
+ // pointer as the caller. Otherwise, the new thread begins with the
+ // stack pointer specified in stack. Various flags can be specified
+ // by the NewThreadFlags bitmask; see the definition above for
+ // details.
+ //
+ // Upon return to the caller, the newly created thread is placed in
+ // thread. If pc is 0, the newly created thread will begin at the same
+ // address that the creator thread returns to, returning NULL in thread.
+
+ new_thread(ulong pc, ulong stack, ulong param, Thread thread out);
+
+ // This method is identical to new_thread except that it is asynchronous,
+ // and thus cannot return thread, and cannot take a value of 0 for
+ // pc or stack.
+
+ new_thread_async(ulong pc, ulong stack, ulong param) async;
+};
+
+// This interface provides control of the binding between Processes
+// and AddrSpaces. It is implemented by the Process class.
+
+
+interface ProcAddrSpace {
+ guid: "A3494EFC-EAB6-11D9-955B-000A95BB581A";
+
+ // Return the AddrSpace in which the process executes
+ get_addr_space(Mem.AddrSpace addr_space out);
+
+ // Attach the process to an AddrSpace. If the process is currently
+ // attached to an AddrSpace, that address space is first detached.
+
+ set_addr_space(Mem.AddrSpace addr_space);
+
+ // This is a hack to implement Unix-like exec() efficiently.
+ // It atomically sets the address space and jumps to the address
+ // given. If the current thread is not in this process, an
+ // InvalidState exception is thrown.
+
+ set_addr_space_and_jump(Mem.AddrSpace addr_space, ulong pc);
+};
+
+// This interface is implemented by the Process class.
+interface ProcTokens {
+ guid: "A616D69E-EAB6-11D9-967E-000A95BB581A";
+
+ // Sets has_token to true if the process has currently has the
+ // specified token, or false if it does not.
+
+ has_token(ulong token, bool has_token out);
+
+ // Grants the specified token to the process. Throws a restartable
+ // TokenFault if the process does not have grant rights to the token.
+
+ grant_token(ulong token);
+};
+
+// This interface is implemented by the Process class.
+interface ProcTraps {
+ using Traps.Trappable;
+ guid: "A88F3742-EAB6-11D9-B772-000A95BB581A";
+
+ // Returns a reference to the Trappable object for this process.
+ // Traps sent to this object will be sent to all threads in
+ // the process.
+
+ get_trappable(Trappable trappable out);
+
+ // Returns a reference to the TrapDistributor object for
+ // this process. Traps sent to the distributor object will
+ // be sent to one arbitrary thread in the process, rather than
+ // all threads which are not ignoring the trap. A trap will
+ // only be sent to a thread that masks the trap if all threads
+ // mask the trap; likewise, a trap will only be ignored if
+ // all threads ignore it (or if the process itself does).
+
+ get_trap_distributor(Trappable trap_distributor out);
+};
--- /dev/null
+using System.*;
+
+namespace Proc;
+
+interface Thread {
+ guid: "AE4A734E-EA8B-11D9-8142-000A95BB581A";
+};
+
+interface ThreadPriv {
+ guid: "A592FD94-5945-11DA-B99F-000A95BB581A";
+
+ get_thread(Thread unpriv out);
+
+ // Asynchronous blocking (i.e. calling this method from a different
+ // thread) is not currently allowed. It may be later, using a
+ // separate blocker field in the kernel. This would be mainly for
+ // supporting debuggers, and would not take effect until the thread
+ // returns to userspace (if currently blocked or executing kernel
+ // code).
+
+ block(Blocker blocker);
+
+ // Like block, but with multiple blockers. The thread wakes when any
+ // of the blockers have been unblocked.
+
+ block_multi(Blocker[] blocker);
+};
+
+interface ThreadSched {
+ guid: "AEB5CD7C-EA8B-11D9-BE15-000A95BB581A";
+
+ // Set the thread's scheduling policy and parameters. Throws
+ // InvalidArgument if sched is not a scheduler provided by the
+ // local kernel, or if param is not of the subtype required by
+ // sched.
+
+ set_sched(Scheduler sched, SchedParams param);
+};
+
+struct SchedParams virtual {
+ guid: "1C406B42-2878-11DA-93B8-00112431A05E";
+};
+
+interface Scheduler {
+ guid: "AF383690-EA8B-11D9-95D8-000A95BB581A";
+};
+
+interface Blocker {
+ guid: "30D19964-5945-11DA-BC3B-000A95BB581A";
+};
--- /dev/null
+namespace Time;
+
+struct Time inline {
+ long seconds; // 0 is Jan 1, 1970 at midnight UTC; negative values
+ // can be used for historical dates or intervals
+ uint nanos; // Values greater than 999,999,999 are errors and
+ // should cause an exception to be thrown.
+};
+
+struct ITime inline {
+ Time value;
+ Time interval;
+};
+
+// A Clock will generally also implement TimerFactory and ITimerFactory,
+// producing timers that are tied to the clock in question. The kernel
+// clock objects do not implement ITimerFactory.
+
+interface Clock {
+ guid: "C7F53E18-FD5C-11D9-B176-000A95BB581A";
+
+ // Get the current time according to this clock.
+
+ get_time(Time time out);
+
+ // Get the clock's resolution. This is the largest
+ // amount by which the clock may quantize its timekeeping.
+
+ get_resolution(Time res out);
+
+};
+
+// FIXME: Implement generic Factory
+interface TimerFactory {
+ guid: "94BB5C90-41B6-11DA-8815-000A95BB581A";
+ new_timer(Timer timer out);
+};
+
+interface ITimerFactory {
+ guid: "97C7A4DA-41B6-11DA-8615-000A95BB581A";
+ new_itimer(ITimer itimer out);
+};
+
+interface SettableClock {
+ guid: "CED0AD0B-FD5C-11D9-954F-000A95BB581A";
+
+ // Set the clock's time. This may be rounded off to no coarser
+ // a precision than the clock's resolution.
+
+ set_time(Time time);
+
+ // Return a reference to the read-only Clock interface associated
+ // with this object.
+
+ get_readonly_clock(Clock clock out);
+};
+
+bitfield TimerFlags {
+ // If set, set_time and get_time use Times relative to the current
+ // time, rather than absolute time.
+
+ RelTime,
+};
+
+interface Timer {
+ guid: "F78C769F-146E-11DA-897A-000A95BB581A";
+
+ // Set/get the time at which this timer expires.
+
+ arm(Time expiry);
+ get_expiry(Time expiry out, bool was_armed out);
+
+ // Stop the timer from firing. This may be called regardless of
+ // whether the timer is currently armed. The timer is guaranteed to
+ // not fire after the call completes, but may fire at any point during
+ // the disarm() call.
+ //
+ // If the firing event is synchronous, it will have completed by the
+ // time the disarm() call completes. Otherwise, though the final
+ // firing has happened, the handler may still be running after
+ // disarm() returns.
+
+ disarm();
+
+ // Set the type of action to take when the timer fires.
+
+ set_action(Events.Event event);
+};
+
+interface ITimer {
+ guid: "D19B0A25-FD5C-11D9-9A2F-000A95BB581A";
+
+ // Arm the timer with the specified expiry. If expiry.interval is not
+ // zero, then when the timer expires, it will automatically be
+ // re-armed with the time set to previous expiry plus the interval.
+ // The previous expiry is atomically read and returned in oldexpiry.
+ //
+ // Nothing will happen on timer expiration unless an action has been
+ // specified with set_action.
+
+ arm(ITime expiry, TimerFlags flags, ITime oldexpiry out);
+
+ // Return the expiry as with arm, but without setting a new
+ // expiry.
+
+ get_expiry(TimerFlags flags, ITime expiry out, bool was_armed out);
+
+ // Disarm the timer.
+
+ disarm();
+
+ // Return the number of times since the last call to get_overrun
+ // that a timer has fired without the previous timer action having
+ // completed.
+ //
+ // For synchronous events such as a TrapEvent, this
+ // includes the user trap handler not finishing on time (and in
+ // such a case, new traps will not be generated for overrun firings).
+ //
+ // For asynchronous events such as a NotifierEvent, this only includes
+ // the system's ability to send the notification. If the user notify
+ // method can't keep up, it will be reflected in a growing queue of
+ // pending messages, not in the overrun count.
+
+ get_overrun(uint overruns out);
+
+ // Set the notifier to be used when the timer expires. For kernel
+ // timers, only sync instances of the kernel event dispatcher may be
+ // used (the sync notifier may be a trigger for an async
+ // NotifierEvent, though).
+
+ set_notifier(Notifiers.Notifier notifier);
+ set_sync_notifier(Notifiers.SyncNotifier notifier);
+};
--- /dev/null
+/* System traps
+ *
+ * Traps are events delivered to a process or thread, similar to
+ * POSIX signals.
+ */
+
+namespace Traps;
+
+// The base Trap struct; all traps must derive from this.
+struct Trap virtual {
+ guid: "2B91FBD9-2878-11DA-98E8-00112431A05E";
+};
+
+// The kernel Process and Thread classes implement this interface.
+// When trap is invoked on a process, a trap is sent to all of the
+// threads in the process. When invoked on a thread, a trap is sent
+// only to that thread. A trap can be sent to one arbitrary thread in
+// a Process by getting a reference to a TrapDistributor class object
+// for that Process.
+
+interface Trappable {
+ guid: "88661962-EAB6-11D9-9B1D-000A95BB581A";
+
+ // Delivers a synchronous trap according to the rules of the
+ // object on which this method was invoked. "delivered" is false
+ // if the trap was ignored, if this was a process's trappable and
+ // there are no threads in the process, or if an error occured
+ // that prevented delivery (though that case should generally be
+ // handled with exceptions). It is *not* false merely because the
+ // trap was masked at the time.
+
+ trap(Trap trap, bool delivered out);
+};
+
+// The kernel Process and Thread classes provide objects that
+// implement this interface. It is separate from Trappable for
+// security reasons (plus, the distributor objects don't have their
+// own masks). If a trap is sent to a thread via its own Trappable,
+// then only the thread's mask will be consulted. If it is sent via
+// a process's Trappable, either directly or through the distributor,
+// the trap must pass both process and thread masks to be delivered.
+
+interface TrapMaskTable {
+ guid: "8D09635A-EAB6-11D9-A7EC-000A95BB581A";
+
+ enum MaskState {
+ Unmask, // Accept this trap and its descendents, and
+ // deliver them immediately. This is the
+ // default behavior.
+ Mask, // Accept this trap and its descendents, but don't
+ // deliver them until unmasked.
+ Ignore, // Ignore this trap and its descendents; the trap
+ // is discarded and will not be delivered even if
+ // the trap is later unmasked.
+ DeleteEntry // Undo previous calls to set_mask for this trap,
+ // reverting ignored status to that of the trap's
+ // parent (or Ignore for the root Trap). This is
+ // valid only for set_mask().
+ };
+
+ struct MaskEntry {
+ System.Objects.Struct trap;
+ MaskState state;
+ };
+
+ // Sets the mask status for the trap in question, as well as its
+ // children. Previous calls to set_ignored on a descendent trap
+ // will not be affected unless override is set to true (in which
+ // case it's as if DeleteEntry were called on all descendents).
+
+ set_mask(MaskEntry mask, bool override);
+
+ // This returns the mask state for a given trap type, as well as
+ // the specific trap type whose mask entry was used to determine
+ // this (it will be either the given trap, or the nearest ancestor
+ // with a mask entry).
+
+ get_mask(System.Objects.Struct trap, MaskEntry mask out);
+
+ // Set the entire mask table as an unsorted linear list of entries.
+ // If override is true, this replaces the existing mask table.
+ // Otherwise, these entries are added to the list, overriding
+ // only exact trap type matches.
+
+ set_mask_table(MaskEntry[] entries, bool override);
+
+ // Get the entire mask table as an unsorted linear list of entries.
+ get_mask_table(MaskEntry[] entries out);
+};
+
+namespace Std;
+
+// The ReduceMemoryUsage trap is delivered when the system is low on
+// uncommitted virtual memory. It is delivered to each process using
+// uncommitted memory, in decreasing order of the amount of such memory,
+// until enough memory is freed to accomodate the request.
+
+struct ReduceMemoryUsage : Trap {
+ guid: "40CBFDB6-2878-11DA-B80D-00112431A05E";
+
+ // Amount of memory in bytes that are needed. A process should
+ // stop looking for memory to free once it has freed this amount
+ // if it would save significant processing time, or if the memory
+ // being freed is still useful as cached data.
+
+ ulong mem_needed;
+};
--- /dev/null
+IDLC does not check whether the contents of a bitfield or enum fit
+within the specified size.
+
+IDLC does not check for loops in the inheritance chain.
--- /dev/null
+Allow "class a.b" as well as/instead of "namespace a class b" in
+type definitions.
+
+Replace use of ordinary "string" with reference-counted "String".
+
+Implement aliases, that can be used to import other namespaces or
+specific elements thereof as if they were defined in the current
+namespace, optionally with a different name. They can also be used
+simply to define an alias within a namespace.
+
+Implement typedefs, which are like aliases, but which are treated as
+distinct types for type-checking purposes. Typedefs should not be
+allowed on namespaces themselves (for now, at least... Maybe when/if
+virtual classes are added that can change).
+
+Finish semantic routines and generate output (mostly done).
+
+Implement the importation of compiled types.
+
+Add implicit System.Object inheritance.
+
+Allow symbols defined in the current and ancestor namespaces to
+override those elsewhere in the search path without ambiguity,
+favoring closer namespaces.
+
+Come up with a proper set of namespace search rules.
+
+Implement loop detection for constant initialization.
+
+C++: Constructors for easy struct initialization
+
+C++: Array<> initializers for constant strings
+
+Implement autorelease pools. Until then, I'm not going to bother
+fixing some memory leaks. Partially done -- the interfaces exist,
+but the implementation doesn't (anything autoreleased will just leak
+for now). Note that before it is implemented, code that assumes a
+newly allocated object has refcount 1 needs to be fixed to
+assume/accept the possibility that it has been autoreleased.
--- /dev/null
+Parameter syntax is ambiguous; does foo(out .bar baz) have an out
+parameter of type .bar, or an in parameter of type out.bar? Perhaps
+the leading "." should be changed to ".." or something...
+ - Leading "." is now "..". This has the side-effect of foo..bar
+ being tokenized as foo ..bar, rather than an error as it was
+ previously.
+ - Attributes now go at the end, rather than the beginning. Perhaps
+ the global prefix can go back to being a single dot, but I'm not
+ going to look into it right now.
+
+Empty bitfields cause parse errors.
+
+Uncaught InvalidArgument exception when namespaces are implicitly
+declared inside a struct (namespace qualification should not be
+allowed).
+
+Anonymous enumerations cause parse errors.
+
+Parameter type names currently cannot be namespace-qualified.
+
+Parameter output does not currently contain type information.
+compiledtypes.h says that .self is CompiledParam and type is
+CompiledAlias, but the code does not generate a directory. Instead
+of doing that, I'll stick a CompiledBasicType and a CompiledAlias at
+the end of CompliedParam.
+
+The current array-of-strings representation is kind of stupid;
+instead of a list of file offsets, switch to counted strings (such as
+is used now in CompiledAlias), and let the reader iterate over them.
+
+Alias should work on more than just types.
+
+A lookup can succeed in pass 1 that would yield a different result in
+pass 2. For instance:
+
+ struct a {
+ };
+
+ struct b {
+ a c;
+ typedef int a;
+ };
+
+Similarly, it's possible for two passes to be insufficient.
+This causes an internal error:
+
+ typedef d b;
+
+ struct d {
+ int f;
+ };
+
+ struct a {
+ b c;
+ };
+
+Alias names get recorded in the type field of Datums (and other
+places) rather than what the alias points to.
+
+Constants are currently not allowed at namespace scope.
+
+Bool constants cannot be initialized.
+
--- /dev/null
+TOP := $(shell dirname `pwd -P`)
+COMP := idlcomp
+BUILDTYPE := build
+ARCHINDEP := y
+include ../Makefile.head
+
+OPT := -O0
+
+RAW_CXXFILES := main namespace types output input util targets
+BUILDCXXFILES += $(RAW_CXXFILES:%=$(DIR)%)
+
+RAW_CXXFILES := idlparse cdlparse scan
+BUILDGENCXXFILES += $(RAW_CXXFILES:%=$(DIR)%)
+
+include languages/c++/Makefile
+
+BUILDDEFS += -D__STDC_FORMAT_MACROS -D__STDC_VERSION__=199901 -I. -DBUILDDIR=$(BUILDDIR)
+
+# Flex's scanner triggers the unused warning.
+BUILDCXXFLAGS += -Wno-unused
+
+TARGETS := $(BUILDDIR)/idlc
+
+$(BUILDDIR)/scan.cc: scan.lex
+ @echo $(COMP): "$<"
+ @$(MKDIR) $(BUILDDIR)
+ @$(FLEX) -o$(BUILDDIR)/scan.cc scan.lex
+
+# Some versions of bison will generate idlparse.cc.h based on an
+# output filename of idlparse.cc, and others will generate
+# idlparse.hh. There's no way I can find to specify which to use, so
+# the name has to be canonicalized after bison is run.
+
+$(BUILDDIR)/idlparse.cc: idlparse.y
+ @echo $(COMP): "$<"
+ @$(MKDIR) $(BUILDDIR)
+ @$(BISON) -b idl -p idl_ --verbose -d idlparse.y -o $(BUILDDIR)/idlparse.cc
+ @if [ -e $(BUILDDIR)/idlparse.hh ]; then \
+ $(MV) $(BUILDDIR)/idlparse.hh $(BUILDDIR)/idlparse.cc.h; \
+ fi
+
+$(BUILDDIR)/cdlparse.cc: cdlparse.y
+ @echo $(COMP): "$<"
+ @$(MKDIR) $(BUILDDIR)
+ @$(BISON) -b cdl -p cdl_ --verbose -d cdlparse.y -o $(BUILDDIR)/cdlparse.cc
+ @if [ -e $(BUILDDIR)/cdlparse.hh ]; then \
+ $(MV) $(BUILDDIR)/cdlparse.hh $(BUILDDIR)/cdlparse.cc.h; \
+ fi
+
+# These dependencies need dummy actions, or make will ignore them for
+# some reason.
+
+$(BUILDDIR)/idlparse.cc.h: $(BUILDDIR)/idlparse.cc
+ @
+
+$(BUILDDIR)/cdlparse.cc.h: $(BUILDDIR)/cdlparse.cc
+ @
+
+idlcclean:
+ $(MAKE) -C tests clean
+ $(RM) $(BUILDDIR)/?dlparse.*
+
+EXTRACLEAN += idlcclean
+
+$(BUILDDIR)/scan.o: $(BUILDDIR)/idlparse.cc.h
+
+export IDLC
+.PHONY: tests
+tests:
+ $(MAKE) -C tests
+
+PREDEP := $(BUILDDIR)/idlparse.cc $(BUILDDIR)/cdlparse.cc
+
+include ../Makefile.tail
+
+$(BUILDDIR)/idlc: $(BUILDOBJS)
+ @echo $(COMP): Linking $(COMP)
+ @$(BUILDCXX) -o $(BUILDDIR)/idlc $(BUILDOBJS)
--- /dev/null
+5/23/04:
+
+What to do about replacing symbols in later passes? A complete
+replacement will break existing pointers, and a symbolic reference
+would require substantial code changes. I'll probably produce output
+after each pass and wipe the symbol table; this is less efficient,
+but similar to how the code was originally intended to work.
+
+How to determine if a pass 2 is needed if the symbol lookup succeded
+due to stale external data that would be overwritten? Perhaps
+eliminate the --overwrite option; then it wouldn't be possible,
+but it could be annoying to have to wipe everything before each
+compilation. It would pretty much enforce one batch of source IDLs
+per output directory, though that's not necessarily a bad thing.
+
+In fact, I think I'll just make --overwrite act on the entire output
+directory; if it's not set, you get an error if there's *anything* in
+there.
+
+Now, what happens when you get around to declaring a symbol in a
+later pass, which has been loaded from the fs already? If the only
+thing the linkage is used for is to generate a name in the output,
+then just replace it and let the old version stay refcounted. Is
+there enough in the pass1 output for error checking, other than
+constant ranges? I think so.
+
+When will circular inheritance checks be done? That'll require the
+ability to compare references, meaning we can't just go replacing
+things. Other than that, I think it can be done at the end of pass
+2. So instead of replacing, I'll just add information to existing
+objects (which means I get to go fix all the places where that sort
+of work is done in the constructor).
+
+5/25/04:
+
+In conclusion on the above, no replacement is done for future passes.
+Constructor calls have been replaced with calls to a static declare()
+function, which will either call a constructor or return the existng
+one (or complain if there's a real conflict (i.e. within this pass or
+with an external symbol)), as well as possibly initialize some data.
+
+Still to do:
+
+Implement pass 3
+Finish implementing output (including sanity check for incomplete
+data).
+Nuke --overwrite, and complain if anything is in the target
+directory.
+
+8/1/04:
+
+Most vtable-related design is done. The GUID pointers will have to
+be dynamically generated somehow (either by the dynamic linker or by
+startup code), to make sure the same pointer is used in all
+components.
+
+The compiled type representation will break badly on a
+case-insensitive filesystem. This is already seen in the current IDL
+files. Some sort of alternate mapping will be needed. Also, it
+looks like the performance of output generation will be fairly poor
+under UFS on OS X; even with the current set of IDL it takes 1/4
+second to generate all output. Not that it was expected to perform
+well on legacy filesystems, but still, I wonder how long it will take
+on the full set of IDL...
+
+9/21/04:
+
+Enum and bitfield inheritance may be useful...
+
+9/22/04:
+
+YYError() should probably be turned into UserError()...
+
+9/25/04:
+
+Or more specifically, into something like RecoverableUserError().
+
+12/7/04:
+
+Arrays need some more thought, specifically multi-dimensional
+and inline arrays, and how they interact with typedefs. Currently,
+multi-dimensional arrays are simply not supported, but what happens
+if you typedef an array, and then create an array of that? It's
+accepted at the moment, and if you accept that, why not regular
+multi-dimensional arrrays? Plus, with out-of-line arrays
+multi-dimensional arrays cannot be created simply by multiplying the
+sizes of each dimension. Support should be added.
+
+12/21/04:
+
+A separate type of reference will likely be needed for persistent
+entities, as the overhead would be too much to always do it. This
+would also allow an entity to be locked against decaching (but not
+ordinary swap-out) by acquiring a non-persistent reference.
+
+If one is willing to disallow such locking, persistence could simply
+be an attribute of a type, but you'd still have problems with
+references to embedded types; a persistent type should be able to
+contain non-persistent types (either inline or by reference).
+
+One implementation of persistence would be for a persistent reference
+to have two states. An uncached reference consists of a
+non-persistent reference to a storage object (or perhaps a cache
+object backed by a storage object). A cached reference is like a
+normal, non-persistent reference. The state would have to be checked
+on every dereference. If it is found to be uncached, the entity is
+retrieved (either from storage, or from cache (it may have gotten
+there via another reference)), the state of the reference is changed,
+and the reference is added to a list to be swept when trying to
+decache the object. Something would need to be done to prevent races
+with asynchronous decaching (perhaps an in-use bit or refcount in the
+reference). However, implementing such a mechanism would be
+difficult on top of an ordinary language.
+
+An alternative, which is less "automatic" from a programmer's
+perspective, but still much better than the current state of things,
+is to have the programmer always acquire an ordinary reference before
+dereferencing (essentially, the in-use refcount of the previous
+mechanism would be managed manually or by garbage collection). The
+programmer can choose whether to keep the ordinary reference around
+(which favors simplicity, determinism, speed) or the storage
+reference (which minimizes memory consumption and requires more
+programmer and CPU time to acquire a usable reference more often).
+
+The difference between this and simply having serialize/deserialize
+methods is that you would receive the same entity address if you
+convert a storage reference multiple times. This causes a problem if
+you do this from different address spaces, though. Shared memory is
+a possibility, but it would be unsuitable in many circumstances due
+to either races or memory wastage (you'd pretty much need to allocate
+a page per entity, so that access can be controlled precisely (you
+shouldn't be able to access entity B just because some other process
+has it in the same page as entity A to which you do have access
+rights, and you can't move one of them to another page without
+breaking the other process's references)).
+
+12/25/04:
+
+Security issues need some more thought. In particular, how to handle
+the case where the rights of multiple processes are needed to do
+something, with no one process fully trusted with all of those
+rights. If you just pass a handle to one process, and don't have any
+further restrictions, then it can do other things with that handle,
+long after it's returned. Delegates would allow it to be limited to
+one method, and handle revocation would be nice as well. However, it
+could still be more privilege than was intended to be granted.
+
+To be fully secure, one-time-use objects could be created that only
+allow a certain, specific operation, but that would have too much
+overhead in many cases.
+
+12/28/04:
+
+An ordinary reference has certain rights associated with it, and
+these rights are transfered to the callee when the reference is
+passed. For persistent references, only the namespace lookup
+permission is bypassed; unserializing (or serializing) the object
+requires whatever capability token has been set for the relevant
+operation. I don't think it would be worthwhile to implement a third
+type of reference that is unserialized but without privilege grant;
+if one wants that, one could make a custom storage object that
+doesn't actually serialize anything, but just hands out the
+real reference upon presentation of the right capability token.
+
+Access revocation is important for making sure the callee doesn't
+hold onto the reference longer than it is supposed to (especially if
+the access rights change after the call). However, it cannot be
+determined automatically how long to allow a call-granted reference.
+Many calls may only need it for the duration of the call, but some
+will need to hold the reference longer. The reference also must be
+revoked if the caller's access to that object is revoked
+(technically, it could remain if the callee has another
+path-to-privilege, but it may not want to, if the action it takes
+assumes that the caller had privilege to carry out the action).
+
+Implementing access revocation requires that we either say fuck-you
+to the app and make it unserialize again if it does happen to have an
+alternate path-to-privilege (I believe this is what Unix does), or
+somehow link the unserialized entity to the persistent reference, and
+give it a chance to prove that it's allowed to retain the reference.
+I greatly favor the latter approach; though it's more complicated to
+implement, going the other way will make lots of apps either buggy or
+hideously complicated.
+
+Alternatively, a reference could be more tightly bound to the exact
+path-to-privilege, requiring the app to explicitly specify which
+source(s) of privilege to consider. This has benefits in avoiding
+odd races where an app would have asked the user for a password to
+elevate privilege, but didn't because it happened to have a given
+authority already for some other reason, but which got revoked before
+the operation completed. It'd also be nice in general in helping
+server processes manage inherited permissions sanely. It'd open the
+multiple-references-per-object can of worms, in that a single address
+space could have references to the same object compare unequal (or
+else have a more complicated comparison operation than simply
+checking the reference pointer).
+
+Aah, fuck it. If you pass a reference to a task, you're trusting it
+not to do bad stuff with it. If you can't give it that trust, send
+it a more limited reference. The major time you'd really want to do
+a revocation is when the access rights to an object change, and the
+fuck-you-legitimate-reference-holder approach could be sufficient for
+the case where the owner of the object is pretty sure there are no
+legitimate references remaining. Existing OSes don't handle anything
+beyond that very well AFAIK, so if I come up with anything better
+it'll be a bonus, but I'm not too worried.
+
+The problem with the trust-it approach is that it's harder to know
+who you're talking to in a polymorphic OS; all you really know
+(without excessive ORB queries) is the interface type. The trust
+level for the implementation will often be zero, and (just about)
+anything that can be done to limit leakage of privilege is a good
+thing. Oh well, we'll see how it turns out after further API design.
+It might turn out to not be such a big deal, and I need to get on
+with making stuff work.
+
+2/1/05: GCC on PPC violates the SYSV ABI by not returning small
+structs in registers. This could have a noticeable performance
+impact given that object references are really small structs.
+While fixing it for existing OSes is unlikely due to existing
+binaries, perhaps it should be fixed for this OS while it still
+can be...
+
+3/13/05: Typedefs are not going to be allowed for interfaces. The
+immediate, selfish reason for this is that allowing them would cause
+some minor ugliness in the idlc code that I'd rather avoid (it's ugly
+enough already). However, I'm having a hard time thinking of
+legitimate uses for them compared to inheritance. If such a use
+comes up, it can be re-allowed later. Or maybe I'll implement it
+soon, and consider it a FIXME until then.
+
+3/19/05: Oops. There was an ambiguity in the IDL, in that the
+double-dot was used both as a global namespace prefix and as a range
+specifier. This wasn't caught before, because idlc wasn't allowing
+namespace-qualified constants. The range specifier is now a
+triple-dot.
+
+3/20/05: The memory management scheme is *still* really screwed up;
+an interface declared in the namespace of a superinterface (and
+similar constructs) will cause reference loops. I think when it
+finally gets to the point that I try to make memory management
+actually work right (which probably won't be until this code is made
+library-able) I'll just declare the entire tree to be a monolithic
+entity, freed in one go when it is no longer needed. Reference
+counting could still be used for things that aren't part of the tree,
+like strings and lists.
+
+5/18/05: I'm thinking of allowing actual return values instead of
+using only out parameters (even with overloaded return-the-last-
+out-parameter features of language bindings). It would more clearly
+express the intent of the programmer to designate one of the out
+parameters as a return value, and it would make it easier to take
+advantage of an ABI's return value registers (instead of always using
+pointers, or continuing the last-out-param hack at the function
+pointer level).
+
+Enums in C++ will be typesafe against assigning one initialized enum
+to an enum of a different type; however, it doesn't look like it will
+be able to be made safe against initializing an enum with a const
+initializer from a different enum type, at least not without breaking
+things like switch. Languages such as D should be able to do it
+properly with strong typedefs.
+
+GCC is refusing to do CSE on upcasts, even with const all over the
+place; this means that repeatedly calling methods from a derived
+interface will be less efficient than casting once to the parent
+interface and using that. At some point, this should be resolved,
+but that's an optimization issue which can wait (and it may require
+compiler changes to let the compiler know that the data *really* is
+not going to change, ever, by anyone (apart from initialization which
+happens before the code in question runs)). Maybe LLVM will do
+better. CSE on downcasts would be nice too.
+
+5/20/05: I've decided against method return values. The efficiency
+part is a minor optimization, and would be better handled with an ABI
+that can handle out parameters directly (thus giving you many return
+registers). Of course, switching ABIs will be painful, but it's
+probably going to happen a few times anyway during the early phases
+of development. As for "express[ing] the intent of the programmer",
+it's really not that big of a deal. Eventually, instead of having
+last-out-param hacks like the C++ binding, a language could allow
+some keyword or symbol to replace one (or more) arguments, causing
+them to be treated as return values.
+
+It has nothing to do with me being lazy. No, not at all. *whistling
+and walking away*
+
+It should be possible to disable async as an implementation
+attribute, so that in-process wrappers can execute directly (e.g.
+FileStream's async methods directly sending out an async method to
+the file, rather than requiring both steps to be async).
+
+5/23/05: FileStream's methods probably should be async anyway,
+and then either call a sync method, or provide its own notifier.
+That way, it can keep the position correct if the read or write did
+not fully succeed. It'll also need to keep all operations strictly
+ordered, so if async calls are used, it needs a message serializer
+object.
+
+See update 7/02/06.
+
+10/04/05: There should be a way to check whether a pointer to a
+virtual struct is of the most derived type.
+
+7/02/06: FileStream no longer exists as an interface; instead, an
+object combining Seekable, HasFile, and the apppropriate stream
+interface(s) (which have both sync and async methods) should be used.
+This object will usually be local, so async isn't an issue, but it
+can be used remotely if it's really needed to synchronize the file
+offset pointer across multiple address spaces.
--- /dev/null
+Improve error recovery, so as to show as many errors as possible
+before aborting.
+
+Support UTF-8 identifiers. This includes deciding which subset of
+UTF-8 characters are valid for such use, and how to map them to
+languages that only support ASCII.
+
+Implement multi-dimensional arrays.
+
+Delegates.
+
+Typedefs and aliases need to support complex types (e.g. typedef
+int[] intarray;). This ties into multi-dimensional arrays above;
+at the very least, it needs to be possible to declare an array
+of strings.
+
+Be consistent in whether the specified name or the fully qualified
+symbol name is reported in error messages. The former makes it more
+obvious which usage is referred to in certain cases; the latter makes
+it obvious which symbol was found in lookup. Adding column
+information as well as line number would address the former concern.
+
+Check for duplicate GUIDs.
+
+Properties with automatic getters and setters (or just getters if
+read-only), which can be overridden with custom getters and setters
+in the class.
+
+Methods can be specified as read-only; a read-only version of the
+interface is then generated, and a secure read-only reference to an
+object can be generated that can only access read-only methods. This
+would save effort declaring a separate interface and a separate
+class, and would allow the read-write interface to use the read-only
+methods without retreiving the read-only object.
+
+Implement marshalling/unmarshalling stubs.
+
+Implement async methods.
+
+Implement remote-only async methods; these are async only when
+invoked out-of-process, and sync (and thus faster) when invoked
+in-process. Full async methods would be used when the async
+semantics are truly required; remote-only async methods would be used
+where async isn't required, but the method is async-safe and would
+benefit from the buffering of async out-of-process methods. This
+would reduce the number of methods that are declared in both sync and
+async versions (and allow the choice between them to be made
+dynamically).
+
+Possibly implement out parameters on remote-only async methods that
+can only be used when invoked as a sync in-process method; the method
+implementation can check to see how it was invoked, and use the out
+parameters to avoid having to make a completion callback.
--- /dev/null
+#ifndef CDL_H
+#define CDL_H
+
+#include <idlc.h>
+
+class Class;
+typedef Ref<Class> ClassRef;
+
+extern list<ClassRef> classes;
+
+class Class : public NameSpace {
+ list<InterfaceRef> ifaces;
+
+public:
+ struct ParamInfo : public RefCountable<ParamInfo> {
+ bool copy;
+ };
+
+ typedef Ref<ParamInfo> ParamInfoRef;
+
+private:
+ // C++ associative arrays are much more of a
+ // pain to use than they should be.
+
+ typedef map<ParamRef, ParamInfoRef> params_map_type;
+ typedef params_map_type::value_type params_valtype;
+ typedef params_map_type::const_iterator params_iter;
+
+public:
+ struct MethodInfo : public RefCountable<MethodInfo> {
+ StrListRef implname;
+ bool copy_params; // True if at least one parameter
+ // has the copy attribute.
+
+ private:
+ params_map_type params;
+
+ public:
+ ParamInfo *get_param(Param *p)
+ {
+ params_iter ret = params.find(p);
+
+ if (ret != params.end())
+ return (*ret).second;
+
+ return NULL;
+ }
+
+ ParamInfo *add_param(Param *p)
+ {
+ ParamInfo *pi = get_param(p);
+
+ if (!pi) {
+ pi = new ParamInfo;
+ pi->copy = false;
+
+ pair<params_iter, bool> ret =
+ params.insert(params_valtype(p, pi));
+
+ assert(ret.second);
+ }
+
+ return pi;
+ }
+ };
+
+ typedef Ref<MethodInfo> MethodInfoRef;
+
+private:
+ typedef map<MethodRef, MethodInfoRef> method_map_type;
+ typedef method_map_type::value_type methods_valtype;
+ typedef method_map_type::const_iterator methods_iter;
+
+ method_map_type methods;
+
+public:
+ InterfaceRef concrete_iface;
+
+ Class(const String *name) :
+ Symbol(name), concrete_iface(new Interface(new String("<anon>")))
+ {
+ classes.push_back(this);
+ }
+
+ void add_iface(Interface *iface)
+ {
+ ifaces.push_back(iface);
+ concrete_iface->add_super(iface);
+ }
+
+ MethodInfo *get_method(Method *m)
+ {
+ methods_iter ret = methods.find(m);
+
+ if (ret != methods.end())
+ return (*ret).second;
+
+ return NULL;
+ }
+
+ MethodInfo *add_method(Method *m)
+ {
+ MethodInfo *mi = get_method(m);
+
+ if (!mi) {
+ mi = new MethodInfo;
+ mi->copy_params = false;
+
+ pair<methods_iter, bool> ret =
+ methods.insert(methods_valtype(m, mi));
+
+ assert(ret.second);
+ }
+
+ return mi;
+ }
+
+ typedef list<InterfaceRef>::const_iterator ifaces_iterator;
+
+ ifaces_iterator ifaces_begin()
+ {
+ return ifaces.begin();
+ }
+
+ ifaces_iterator ifaces_end()
+ {
+ return ifaces.end();
+ }
+
+ // FIXME: check for duplicate method implementation names.
+ void finalize()
+ {
+ concrete_iface->finalize_class_iface();
+ }
+};
+
+#endif
--- /dev/null
+%{
+/* cdlparse.y -- parser for the CDL compiler
+ *
+ * Written by Scott Wood <scott@buserror.net>
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <string>
+
+#include <idlc.h>
+#include <cdl.h>
+
+#define YYDEBUG 1
+
+#define do_yyerror() do { \
+ fprintf(stderr, "YYERROR at %d\n", __LINE__); \
+ throw UserError(); \
+} while (0)
+
+static StrListRef nspace_name;
+static StrListRef cur_strlist;
+static IDListRef cur_idlist;
+static ClassRef cur_class;
+static MethodRef cur_method;
+
+%}
+%union {
+ // The lifetime of any of these pointers is one instance
+ // of the one_input rule.
+
+ String *string;
+ StrList *strl;
+ IDList *idl;
+ SymList *syml;
+ Symbol *sym;
+ Method *meth;
+ Class *cla;
+ bool boolean;
+
+ struct { // Used for namespace-qualified type declarations
+ NameSpace *ns; // Namespace portion -- all but last field
+ const String *ident; // New identifier portion -- last field
+ } decl;
+}
+
+// The token list must be exactly the same as in idlparse.y, so that
+// the same lexer can be used.
+
+%token <string> TOK_IDENT
+%token TOK_IFACE
+%token TOK_STRUCT
+%token TOK_CHAR
+%token TOK_OCTET
+%token <con> TOK_ICON
+%token <con> TOK_FCON
+%token <con> TOK_UCON
+%token <con> TOK_INVALID
+%token TOK_BOOL
+%token TOK_SHORT
+%token TOK_INT
+%token TOK_LONG
+%token TOK_USHORT
+%token TOK_UINT
+%token TOK_ULONG
+%token TOK_FSHORT
+%token TOK_FLONG
+%token TOK_CONST
+%token TOK_BITFIELD
+%token TOK_ENUM
+%token TOK_NAMESPACE
+%token TOK_USING
+%token TOK_ASYNC
+%token TOK_INOUT
+%token TOK_OUT
+%token TOK_3DOT
+%token TOK_2DOT
+%token <string> TOK_STR
+%token TOK_SHARED
+%token TOK_PUSH
+%token TOK_TYPEDEF
+%token TOK_ALIAS
+%token TOK_VIRTUAL
+%token TOK_GUID
+%token TOK_INLINE
+%token TOK_STATIC
+%token TOK_IMMUTABLE
+%token TOK_TRUE
+%token TOK_FALSE
+
+// CDL tokens
+%token TOK_COPY
+%token TOK_METHOD
+%token TOK_CLASS
+%token TOK_NAME
+
+// These are not real tokens, but are used as special values in places that
+// normally accept tokens.
+%token TOK_NONE
+%token TOK_ANON
+%token TOK_DCON
+
+%type <strl> ids
+%type <string> ident
+%type <strl> qualified_ident
+%type <idl> qualified_idlist
+%type <boolean> maybe_dbldot
+%type <decl> qualified_decl
+
+%%
+
+input:
+ /* empty */
+| input one_input
+;
+
+one_input:
+ one_input_real
+;
+
+one_input_real:
+ class
+| namespace
+;
+
+namespace_body:
+ ';' {
+ NameSpace *ret = add_nspace(nspace_name, false);
+ nspace_name = NULL;
+
+ if (!ret)
+ do_yyerror();
+ }
+| '{' {
+ NameSpace *ret = add_nspace(nspace_name, true);
+ nspace_name = NULL;
+
+ if (!ret)
+ do_yyerror();
+ } input '}' {
+ pop_nspace();
+ }
+| {
+ NameSpace *ret = add_nspace(nspace_name, true);
+ nspace_name = NULL;
+
+ if (!ret)
+ do_yyerror();
+ } one_input {
+ pop_nspace();
+ }
+;
+
+namespace:
+ TOK_NAMESPACE qualified_ident {
+ nspace_name = $2;
+ } namespace_body
+;
+
+ids_body:
+ ident {
+ cur_strlist->push_back($1);
+ }
+| ids_body ',' ident {
+ cur_strlist->push_back($3);
+ }
+;
+
+ids:
+ {
+ cur_strlist = new StrList;
+ } ids_body {
+ $$ = cur_strlist;
+ cur_strlist = NULL;
+ }
+;
+
+ident:
+ TOK_IDENT
+| TOK_ASYNC {
+ $$ = new String("async", cur_input_file, curline, TOK_ASYNC);
+ }
+| TOK_INOUT {
+ $$ = new String("inout", cur_input_file, curline, TOK_INOUT);
+ }
+| TOK_OUT {
+ $$ = new String("out", cur_input_file, curline, TOK_OUT);
+ }
+| TOK_SHARED {
+ $$ = new String("shared", cur_input_file, curline, TOK_SHARED);
+ }
+| TOK_PUSH {
+ $$ = new String("push", cur_input_file, curline, TOK_PUSH);
+ }
+| TOK_SHORT {
+ $$ = new String("short", cur_input_file, curline, TOK_SHORT);
+ }
+| TOK_INT {
+ $$ = new String("int", cur_input_file, curline, TOK_INT);
+ }
+| TOK_LONG {
+ $$ = new String("long", cur_input_file, curline, TOK_LONG);
+ }
+| TOK_USHORT {
+ $$ = new String("ushort", cur_input_file, curline, TOK_USHORT);
+ }
+| TOK_UINT {
+ $$ = new String("uint", cur_input_file, curline, TOK_UINT);
+ }
+| TOK_ULONG {
+ $$ = new String("ulong", cur_input_file, curline, TOK_ULONG);
+ }
+| TOK_CHAR {
+ $$ = new String("char", cur_input_file, curline, TOK_CHAR);
+ }
+| TOK_OCTET {
+ $$ = new String("octet", cur_input_file, curline, TOK_OCTET);
+ }
+| TOK_FSHORT {
+ $$ = new String("fshort", cur_input_file, curline, TOK_FSHORT);
+ }
+| TOK_FLONG {
+ $$ = new String("flong", cur_input_file, curline, TOK_FLONG);
+ }
+| TOK_BOOL {
+ $$ = new String("bool", cur_input_file, curline, TOK_BOOL);
+ }
+| TOK_METHOD {
+ $$ = new String("method", cur_input_file, curline, TOK_METHOD);
+ }
+| TOK_NAME {
+ $$ = new String("name", cur_input_file, curline, TOK_NAME);
+ }
+| TOK_COPY {
+ $$ = new String("copy", cur_input_file, curline, TOK_COPY);
+ }
+| TOK_CLASS {
+ $$ = new String("class", cur_input_file, curline, TOK_CLASS);
+ }
+| TOK_GUID {
+ $$ = new String("guid", cur_input_file, curline, TOK_GUID);
+ }
+| TOK_STATIC {
+ $$ = new String("static", cur_input_file, curline, TOK_STATIC);
+ }
+| TOK_IFACE {
+ $$ = new String("interface", cur_input_file, curline, TOK_IFACE);
+ }
+| TOK_STRUCT {
+ $$ = new String("struct", cur_input_file, curline, TOK_STRUCT);
+ }
+| TOK_CONST {
+ $$ = new String("const", cur_input_file, curline, TOK_CONST);
+ }
+| TOK_BITFIELD {
+ $$ = new String("bitfield", cur_input_file, curline, TOK_BITFIELD);
+ }
+| TOK_ENUM {
+ $$ = new String("enum", cur_input_file, curline, TOK_ENUM);
+ }
+| TOK_USING {
+ $$ = new String("using", cur_input_file, curline, TOK_USING);
+ }
+| TOK_TYPEDEF {
+ $$ = new String("typedef", cur_input_file, curline, TOK_TYPEDEF);
+ }
+| TOK_ALIAS {
+ $$ = new String("alias", cur_input_file, curline, TOK_ALIAS);
+ }
+| TOK_VIRTUAL {
+ $$ = new String("virtual", cur_input_file, curline, TOK_VIRTUAL);
+ }
+| TOK_INLINE {
+ $$ = new String("inline", cur_input_file, curline, TOK_INLINE);
+ }
+;
+
+qualified_ident_raw:
+ ident {
+ cur_strlist->push_back($1);
+ }
+| qualified_ident_raw '.' ident {
+ cur_strlist->push_back($3);
+ }
+;
+
+maybe_dbldot:
+ /* empty */ {
+ $$ = false;
+ }
+| TOK_2DOT {
+ $$ = true;
+ }
+;
+
+/* The mid-rule action is to keep curline correct, as well
+ as creating cur_strlist. */
+qualified_ident:
+ maybe_dbldot {
+ cur_strlist = new StrList;
+
+ if ($1)
+ cur_strlist->push_front(new String("", cur_input_file,
+ curline, TOK_IDENT));
+ } qualified_ident_raw {
+ $$ = cur_strlist;
+ cur_strlist = NULL;
+ }
+;
+
+qualified_ids:
+ qualified_ident {
+ cur_idlist->push_back($1);
+ }
+| qualified_ids ',' qualified_ident {
+ cur_idlist->push_back($3);
+ }
+;
+
+qualified_idlist:
+ {
+ cur_idlist = new IDList;
+ } qualified_ids {
+ $$ = cur_idlist;
+ cur_idlist = NULL;
+ }
+;
+
+qualified_decl:
+ maybe_dbldot {
+ if ($1)
+ yyerrorf("Namespaces cannot be declared "
+ "with an absolute path.");
+
+ cur_strlist = new StrList;
+ } qualified_ident_raw {
+ $$.ident = cur_strlist->back();
+ $$.ident->retain();
+
+ cur_strlist->pop_back();
+
+ if (!cur_strlist->empty())
+ $$.ns = add_nspace(cur_strlist, true);
+ else {
+ $$.ns = cur_nspace;
+ nspace_stack.push_front(cur_nspace);
+ }
+
+ cur_strlist = NULL;
+
+ if (!$$.ns)
+ do_yyerror();
+ }
+;
+
+class:
+ TOK_CLASS qualified_decl {
+ cur_class = new Class($2.ident);
+ $2.ns->add_user(cur_class);
+ } ':' qualified_idlist {
+ if ($5->empty()) {
+ yyerrorf("A class must implement at least one interface.");
+ throw UserError();
+ }
+
+ for (IDList::const_iterator i = $5->begin(); i != $5->end(); ++i) {
+ StrList *strl = *i;
+ Symbol *sym = lookup_sym(toplevel, strl, toplevel);
+ Interface *iface = dynamic_cast<Interface *>(sym);
+
+ if (!iface) {
+ yyerrorfl(cur_input_file, strl->back()->line,
+ "\"%s\" is not an interface.",
+ strl->flatten()->c_str());
+
+ throw UserError();
+ }
+
+ cur_class->add_iface(iface);
+ }
+ } class_body {
+ pop_nspace();
+ cur_class->finalize();
+ cur_class = NULL;
+ }
+;
+
+class_body:
+ ';'
+| '{' multi_class_body '}'
+;
+
+multi_class_body:
+ one_class_body
+| multi_class_body one_class_body
+;
+
+one_class_body:
+ TOK_METHOD qualified_ident {
+ // FIXME: use the set of supported interfaces as a search path
+ Symbol *sym = lookup_sym(toplevel, $2, toplevel);
+ cur_method = dynamic_cast<Method *>(sym);
+
+ if (!cur_method) {
+ yyerrorfl(cur_input_file, $2->back()->line,
+ "\"%s\" is not a method.",
+ $2->flatten()->c_str());
+ throw UserError();
+ }
+ } method_body {
+ cur_method = NULL;
+ }
+;
+
+method_body:
+ one_method_body
+| '{' multi_method_body '}'
+;
+
+multi_method_body:
+ one_method_body
+| multi_method_body one_method_body
+;
+
+one_method_body:
+ TOK_NAME qualified_ident ';' {
+ Class::MethodInfo *mi = cur_class->add_method(cur_method);
+ mi->implname = $2;
+ }
+| TOK_COPY ids ';' {
+ Class::MethodInfo *mi = cur_class->add_method(cur_method);
+
+ for (StrList::const_iterator i = $2->begin(); i != $2->end(); ++i) {
+ const String *str = *i;
+ Symbol *sym;
+
+ try {
+ sym = cur_method->lookup(str);
+ }
+ catch (SymbolNotFound) {
+ yyerrorfl(cur_input_file, str->line,
+ "\"%s\" is not a parameter of \"%s\".",
+ str->c_str(),
+ cur_method->get_fq_name()->flatten()->c_str());
+
+ throw UserError();
+ }
+
+ Param *p = dynamic_cast<Param *>(sym);
+ assert(p);
+
+ Class::ParamInfo *pi = mi->add_param(p);
+ pi->copy = true;
+ mi->copy_params = true;
+ }
+ }
+;
+
+%%
+
+static Con dummy_con;
+
+void setup_cdlparse()
+{
+ yylval_con = &dummy_con;
+ yylval_string = &cdl_lval.string;
+}
+
+list<ClassRef> classes;
+
--- /dev/null
+#ifndef IDLC_COMPILEDDEFS_H
+#define IDLC_COMPILEDDEFS_H
+
+#include <stdint.h>
+
+#if !defined(BITFIELD_LE) && !defined(BITFIELD_BE)
+#error Please define your bitfield endianness (BITFIELD_LE or BITFIELD_BE).
+#endif
+
+// This is the compiled representation of definitions when stored on
+// a legacy filesystem (as opposed to native object storage).
+// Normally, a standard encoding would be used to map objects to
+// directories and files; however, such encodings cannot be used
+// before they exist. This special mapping allows the system to be
+// bootstrapped from a legacy system.
+//
+// As this is a transitional representation, and IDL management is
+// not performance critical, emphasis has been placed on simplicity
+// rather than efficiency. In particular, I don't want to write
+// throw-away code to compensate for most filesystems' inability to
+// deal with small files efficiently, nor do I want to muck around
+// with storing the compiled definitions in textual form (thus
+// needing to parse them *again*; I want to keep that out of the
+// ORB). Dot files and directories are used rather than the more
+// straightforward way of keeping the namespace under its own member
+// in order to make the compiled definitions easier to browse with
+// normal utilities (in the final system, you'll be able to use a
+// custom view that arranges it however you want; however, Unix
+// doesn't have custom filesystem views).
+//
+// I have tried to make the definitions close to what could be
+// emitted by an IDL compiler, minus the hierarchical relationship
+// (which won't exist on a legacy filesystem). These definitions
+// require C++ (plus C99 stdint.h); C will choke on the assumed
+// struct namespace behavior (among other things).
+//
+// Each definition is represented by a directory. If the definition is
+// for a non-generic namespace (i.e. it is for a struct, interface,
+// bitfield, etc.), the definition itself is placed in a file called
+// ".self" inside the directory. The file contains a CompiledDefHeader
+// struct followed by the appropriate type-specific struct.
+//
+// The non-dotted contents of the directory are the contents of the
+// definition's namespace, if any. Anonymous types are given the
+// name "_anon_<unique>", where <unique> is an arbitrary non-conflicting
+// name.
+//
+// A compiled definition file may be either big or little endian; if
+// magic_reversed is found, then all integers in the file must be
+// reversed prior to usage.
+//
+// Bitfields, on the other hand, must be arranged in a little endian
+// format (i.e. the first field gets the least significant bits; the byte
+// order of the containing integer is the same as for non-bitfield
+// integers). Bitfields generated by idlc will contain padding and
+// reversed members if the target has big-endian bitfields.
+
+struct CompiledDefHeader {
+ static const uint32_t magic_normal = 0x2d29c8a9;
+ static const uint32_t magic_reversed = 0xa9c8292d;
+
+ uint32_t magic; // magic_normal or magic_reversed
+
+ enum Type {
+ NameSpace,
+ BasicType,
+ Datum,
+ Interface,
+ Method,
+ Param,
+ Struct,
+ Enum,
+ BitField,
+ Alias,
+ TypeDef
+ };
+
+ // One of the above types; the enum isn't used directly, as the
+ // size is not guaranteed.
+ int32_t type;
+};
+
+struct CompiledNameSpace {
+ // The length of the string in bytes (not characters), excluding
+ // the null terminator.
+
+ int32_t length;
+
+ // The fully qualified name of the namespace follows, as a
+ // null-terminated UTF-8 string. This allows the namespace
+ // to be mounted by merely supplying its filesystem path,
+ // without having to specify the mount point.
+};
+
+// This struct does not appear standalone, but is included in various
+// structs that can refer to arrays.
+struct CompiledArray {
+ // Inclusive lower and upper bounds of the array. If the
+ // array has no upper bound, [1] should be -1. If the array
+ // has no lower bound, [0] should be 0. If this is not an
+ // array, both should be 0.
+
+ int64_t bounds[2];
+};
+
+struct CompiledBasicType {
+ // Size of the type, as follows:
+ // Byte: 8
+ // Short: 16
+ // Int: 32
+ // Long: 64
+ // FShort: 32
+ // FLong: 64
+ // Bool: 0 (actual size is implementation defined)
+ //
+ // Other sizes may be used within bitfields (up to a max of 64 bits).
+
+ int32_t bits;
+
+ // Unsigned, Float, and Bool are mutually exclusive.
+
+ union Flags {
+ struct Field {
+#ifdef BITFIELD_LE
+ unsigned int Unsigned:1;
+ unsigned int Float:1;
+ unsigned int Bool:1;
+ unsigned int TypeDef:1;
+#else
+ unsigned int _pad:28;
+
+ unsigned int TypeDef:1;
+ unsigned int Bool:1;
+ unsigned int Float:1;
+ unsigned int Unsigned:1;
+#endif
+ } field;
+
+ uint32_t raw;
+
+ struct init {
+ static const uint32_t Unsigned = 1 << 0;
+ static const uint32_t Float = 1 << 1;
+ static const uint32_t Bool = 1 << 2;
+ static const uint32_t TypeDef = 1 << 3;
+ };
+ } flags;
+
+ uint64_t guid[2];
+
+ CompiledArray array;
+};
+
+
+struct CompiledAlias {
+ // The length of the string in bytes (not characters), excluding
+ // the null terminator.
+
+ int32_t length;
+
+ // The name of the aliased symbol follows, as a null-terminated
+ // UTF-8 string. This will be the final symbol, and not an alias.
+};
+
+// A CompiledDatum is used for data fields in a structure, bitfield, or
+// enumeration.
+
+struct CompiledDatum {
+ // This is a description of the datum's type, if it is not a named type.
+ // This is ignored (except the array part) if "type" is non-empty.
+
+ CompiledBasicType basictype;
+
+ // Const value(s); cast to the appropriate type. For Bool, cast to
+ // Byte; 0 is false, 1 is true. Extend all types to 64-bits.
+ // ucon is also used for bitfield element sizes.
+
+ union {
+ int64_t icon;
+ uint64_t ucon;
+ double fcon;
+ char data[8];
+ };
+
+ // Const can only be used for basic types (both named and unnamed);
+ // Immutable is used for non-initialized const fields. Invalid is
+ // only used internally by idlc; it is reserved in the file format.
+
+ union Flags {
+ struct Field {
+#ifdef BITFIELD_LE
+ unsigned int Const:1;
+ unsigned int Invalid:1;
+ unsigned int Inline:1;
+ unsigned int Immutable:1;
+#else
+ unsigned int _pad:28;
+
+ unsigned int Immutable:1;
+ unsigned int Inline:1;
+ unsigned int Invalid:1;
+ unsigned int Const:1;
+#endif
+ } field;
+
+ uint32_t raw;
+
+ struct init {
+ static const uint32_t Const = 1 << 0;
+ static const uint32_t Invalid = 1 << 1;
+ static const uint32_t Inline = 1 << 2;
+ static const uint32_t Immutable = 1 << 3;
+ };
+ } flags;
+
+ // If it's a named type, this points to the type.
+ CompiledAlias type;
+};
+
+// Methods, member types, member constants, etc. go in the namespace.
+// Methods are ordered; all others are unordered.
+
+struct CompiledInterface {
+ int32_t num_methods;
+ int32_t num_supers;
+
+ uint64_t guid[2];
+
+ // An array of num_methods + num_supers length of CompiledAliases
+ // representing methods in declared order follows, and then
+ // superclasses in declared order, follows. The names of metods shall
+ // consist only of the final component, with no namespaces prepended.
+ // The names of superclasses are fully namespace-qualified. Each
+ // CompiledAlias shall begin on a 4-byte boundary.
+};
+
+// Parameters go in the namespace, and are ordered according to the
+// list contained in this struct. Exceptions thrown are unordered
+// Aliases in a subdirectory called ".exceptions", with arbitrary
+// names.
+
+struct CompiledMethod {
+ union Flags {
+ struct Field {
+#ifdef BITFIELD_LE
+ unsigned int Async:1;
+#else
+ unsigned int _pad:31;
+
+ unsigned int Async:1;
+#endif
+ } field;
+
+ uint32_t raw;
+
+ struct init {
+ static const uint32_t Async = 1 << 0;
+ };
+ } flags;
+
+ int32_t num_entries;
+
+ // An array of num_entries length of CompiledAliases representing
+ // parameters in declared order follows. Each name shall be as in
+ // CompiledInterface, without namespace qualification.
+};
+
+// A param isn't a namespace; however, it is a directory. In addition to
+// the ".self" file, the directory contains a "type" Alias file.
+struct CompiledParam {
+ // This is a description of the parameter's type, if it is not a
+ // named type. This is ignored (except the array part) if "type"
+ // is non-empty.
+
+ CompiledBasicType basictype;
+
+ union Flags {
+ struct Field {
+#ifdef BITFIELD_LE
+ unsigned int In:1;
+ unsigned int Out:1;
+
+ unsigned int Shared:1;
+ unsigned int Push:1;
+ unsigned int Inline:1;
+ unsigned int Immutable:1;
+#else
+ unsigned int _pad:26;
+
+ unsigned int Immutable:1;
+ unsigned int Inline:1;
+ unsigned int Push:1;
+ unsigned int Shared:1;
+
+ unsigned int Out:1;
+ unsigned int In:1;
+#endif
+ } field;
+
+ uint32_t raw;
+
+ struct init {
+ static const uint32_t In = 1 << 0;
+ static const uint32_t Out = 1 << 1;
+ static const uint32_t Shared = 1 << 2;
+ static const uint32_t Push = 1 << 3;
+ static const uint32_t Inline = 1 << 4;
+ static const uint32_t Immutable = 1 << 5;
+ };
+ } flags;
+
+
+ // If it's a named type, this points to the type.
+ CompiledAlias type;
+};
+
+// Fields, member types, member constants, etc. go in the namespace.
+// All but fields are unordered.
+
+struct CompiledStruct {
+ union Flags {
+ struct Field {
+#ifdef BITFIELD_LE
+ // Struct has a superstruct.
+
+ unsigned int Super:1;
+
+ // Struct has run-time type information. This requires that
+ // Super be set.
+
+ unsigned int Virtual:1;
+
+ // Struct defaults to "inline" when declared in a struct.
+ // This is mandatory for anonymous structs.
+
+ unsigned int Inline:1;
+#else
+ unsigned int _pad:29;
+
+ unsigned int Inline:1;
+ unsigned int Virtual:1;
+ unsigned int Super:1;
+#endif
+ } field;
+
+ uint32_t raw;
+
+ struct init {
+ static const uint32_t Super = 1 << 0;
+ static const uint32_t Virtual = 1 << 1;
+ static const uint32_t Inline = 1 << 2;
+ };
+ } flags;
+
+ int32_t num_entries;
+
+ uint64_t guid[2];
+
+ // An array of num_entries length of CompiledAliases representing
+ // fields in declared order follows. Each name shall be as in
+ // CompiledMethod. If the Super flag is set, then another
+ // fully-qualified CompiledAlias is placed at the end of the array.
+};
+
+// Enum entries are unsigned BasicDatums of the specified size.
+
+struct CompiledEnum {
+ // Size of enumeration type
+ int32_t bits;
+
+ int32_t num_entries;
+
+ uint64_t guid[2];
+
+ // An array of num_entries length of CompiledAliases representing
+ // values in declared order follows. Each name shall be as in
+ // CompiledMethod.
+};
+
+// BitField fields are unsigned BasicDatums and Enum Datums of
+// arbitrary size, which must add up to at most "bits".
+
+struct CompiledBitField {
+ // Width of bitfield
+ int32_t bits;
+
+ int32_t num_entries;
+
+ uint64_t guid[2];
+
+ // An array of num_entries length of CompiledAliases representing
+ // fields in declared order follows. Each name shall be as in
+ // CompiledMethod.
+};
+
+#endif
--- /dev/null
+// idlc.h -- Definitions used throughout idlc.
+// A lot of this should probably be factored out into more specific headers.
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#ifndef IDLC_H
+#define IDLC_H
+
+// inttypes.h on OSX assumes it can use restrict, but C++ doesn't have it.
+// Hopefully C++ will catch up to C soon...
+
+#define restrict
+
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <string>
+#include <stack>
+#include <list>
+#include <map>
+#include <vector>
+
+#include "compileddef.h"
+
+using std::list;
+using std::stack;
+using std::string;
+using std::pair;
+using std::map;
+using std::vector;
+
+int yylex();
+int idl_lex();
+#define cdl_lex idl_lex
+void idl_error(char *s);
+
+#define cdl_error idl_error
+
+void setup_idlparse();
+void setup_cdlparse();
+
+#ifdef __GNUC__
+void yyerrorf(const char *s, ...) __attribute__((format(printf, 1, 2)));
+void yyerrorfl(const char *file, int line, const char *s, ...)
+__attribute__((format(printf, 3, 4)));
+#else
+void yyerrorf(const char *s, ...);
+void yyerrorfl(const char *file, int line, const char *s, ...);
+#endif
+
+int idl_parse();
+int cdl_parse();
+int finish_lex();
+
+extern int idl_debug, cdl_debug, curline, impl, num_err;
+extern int current_pass;
+extern FILE *yyin;
+extern unsigned int enum_pos;
+extern const char *cur_input_file;
+extern int compiling_idl, compiling_cdl;
+extern bool makedep;
+
+struct InternalError
+{
+ // Location in the IDLC source of the BUG()
+ const char *file;
+ int line;
+
+ InternalError(const char *file, int line) :
+ file(file), line(line)
+ {
+ }
+};
+
+#define BUG() do { \
+ throw InternalError(__FILE__, __LINE__); \
+} while (0)
+
+#undef assert
+
+#define assert(x) do { \
+ if (!(x)) BUG(); \
+} while (0)
+
+class Releasable {
+public:
+ mutable int count;
+
+ virtual ~Releasable()
+ {
+ }
+
+ Releasable(int COUNT) : count(COUNT)
+ {
+ }
+
+ void release() const
+ {
+ if (count <= 0) {
+ fprintf(stderr, "Reference count is %d in release\n", count);
+ BUG();
+ }
+
+ if (!--count)
+ delete this;
+ }
+};
+
+class AutoReleasePool {
+ list<const Releasable *> pool;
+
+public:
+ void add(const Releasable *obj)
+ {
+ pool.push_back(obj);
+ }
+
+ void clean()
+ {
+ for (list<const Releasable *>::iterator i = pool.begin();
+ i != pool.end(); ++i)
+ {
+ const Releasable *obj = *i;
+ obj->release();
+ }
+
+ pool.clear();
+ }
+};
+
+extern AutoReleasePool autorelease_pool;
+
+template <typename T>
+class RefCountable : public Releasable {
+private:
+ // RefCountable objects should never be assigned to,
+ // as there could be references to the object remaining.
+ // The private assignment operator prevents this, unless
+ // a subclass defines its own assigment operator (don't
+ // do that).
+ void operator =(const RefCountable &rc)
+ {
+ BUG();
+ }
+
+ RefCountable(const RefCountable &rc) : Releasable(1)
+ {
+ BUG();
+ }
+
+public:
+ RefCountable() : Releasable(1)
+ {
+ // Normally, this wouldn't be automatic, but this is what most
+ // things in IDLC are going to want, and it elimanates problems
+ // with needing to cast the return type of autorelease().
+ //
+ // The automatic autorelease() means that all refcountable objects
+ // must be allocated with "new", not on the stack, as global
+ // data, or as a class member.
+
+ autorelease();
+ }
+
+ virtual ~RefCountable()
+ {
+ }
+
+ const T *retain() const
+ {
+ if (count <= 0) {
+ fprintf(stderr, "Reference count is %d in retain\n", count);
+ BUG();
+ }
+
+ count++;
+ return static_cast<const T *>(this);
+ }
+
+ T *retain()
+ {
+ if (count <= 0) {
+ fprintf(stderr, "Reference count is %d in retain\n", count);
+ BUG();
+ }
+
+ count++;
+ return static_cast<T *>(this);
+ }
+
+ const T *autorelease() const
+ {
+ autorelease_pool.add(static_cast<Releasable *>(this));
+ return static_cast<T *>(this);
+ }
+
+ T *autorelease()
+ {
+ autorelease_pool.add(static_cast<Releasable *>(this));
+ return static_cast<T *>(this);
+ }
+
+ // This is only here because C++ obnoxiously requires it to
+ // be just because it's "called" from code excluded with an
+ // if (0) in a template. No code is ever generated that calls
+ // it, but it still must exist.
+
+ bool operator < (const RefCountable &rc)
+ {
+ BUG();
+ }
+};
+
+// T must be RefCountable
+template<typename T, bool compare_ptrs = true>
+class Ref {
+ // STL containers like to use const on the items they
+ // contain; the mutable allows such containers to hold
+ // pointers to non-const data. For truly const Refs,
+ // make T const, as in StringRef. Unfortunately,
+ // it cannot be done in a more fine-grained manner,
+ // AFAICT.
+
+public:
+ mutable T *data;
+
+public:
+ Ref()
+ {
+ data = NULL;
+ }
+
+ Ref(T *data) : data(data)
+ {
+ if (data)
+ data->retain();
+ }
+
+ Ref(Ref &le) : data(le.data)
+ {
+ if (data)
+ data->retain();
+ }
+
+ Ref &operator =(const Ref &le)
+ {
+ // The retain must come first, in case both Refs are the same
+ if (le.data)
+ le.data->retain();
+ if (data)
+ data->release();
+
+ data = le.data;
+
+ return *this;
+ }
+
+ Ref &operator =(T *new_data)
+ {
+ // The retain must come first, in case both Refs are the same
+ if (new_data)
+ new_data->retain();
+ if (data)
+ data->release();
+
+ data = new_data;
+
+ return *this;
+ }
+
+ ~Ref()
+ {
+ if (data)
+ data->release();
+ }
+
+ operator T *() const
+ {
+ return data;
+ }
+
+ operator T &() const
+ {
+ return *data;
+ }
+
+ T *operator *() const
+ {
+ return data;
+ }
+
+ T *operator ->() const
+ {
+ return data;
+ }
+
+ bool operator == (const Ref &le) const
+ {
+ if (compare_ptrs)
+ return data == le.data;
+ else
+ return *data == *le.data;
+ }
+
+ bool operator != (const Ref &le) const
+ {
+ if (compare_ptrs)
+ return data != le.data;
+ else
+ return *data != *le.data;
+ }
+
+ bool operator < (const Ref &le) const
+ {
+ if (compare_ptrs)
+ return reinterpret_cast<intptr_t>(data) <
+ reinterpret_cast<intptr_t>(le.data);
+ else
+ return *data < *le.data;
+ }
+};
+
+class String : public string, public RefCountable<String> {
+public:
+ // Origin of the string, if from the IDL file.
+ // Otherwise, fill both values with zero.
+
+ const char *file;
+ int line;
+ int token;
+
+ String(const char *s = "") : string(s)
+ {
+ file = "";
+ line = 0;
+ token = 0;
+ }
+
+ String(const String &s) : string(s)
+ {
+ file = s.file;
+ line = s.line;
+ token = s.token;
+ }
+
+ String(const char *s, const char *file, int line, int token) :
+ string(s), file(file), line(line), token(token)
+ {}
+};
+
+extern String **yylval_string;
+typedef Ref<const String, false> StringRef;
+
+/* If a StrList is used for a namespace-qualified identifier, and
+ said identifier begins with ".." (i.e. starts from the root
+ namespace), the leading ".." is represented by a zero-length
+ String.
+
+ Note that list doesn't have a virtual destructor, so all deletions
+ should happen through either RefCountable or the wrapper List
+ class. */
+
+class StrList : public list<StringRef>, public RefCountable<StrList> {
+public:
+ StrList()
+ {
+ }
+
+ // Parse a flat String into a StrList, using the specified delimeter.
+ StrList(const String *input, char delimiter = '.');
+
+ // Turn a StrList into a flat String, using the specified delimiter.
+ String *flatten(const char *delimiter = ".");
+};
+
+typedef Ref<StrList> StrListRef;
+
+// ConList is like StrList, but with constant initializers
+
+class Datum;
+
+struct Con {
+ union {
+ int64_t icon;
+ uint64_t ucon;
+ StrList *dcon;
+
+ // FIXME: handle platforms with weird floating point endianness
+ double fcon;
+
+ char data[8];
+ } con;
+
+ // TOK_ICON, TOK_UCON, TOK_FCON, TOK_BOOL, TOK_DCON,
+ // TOK_INVALID, or TOK_NONE
+ // Constants are stored as signed (ICON) unless too
+ // large to fit in a signed 64-bit integer. Additional size and
+ // signedness checks are mode when down casting to a smaller size
+ // to fit into a particular datum; such constants will have a
+ // value of zero.
+ //
+ // TOK_NONE is valid for maybeconst and size. TOK_INVALID
+ // indicates a previously detected error; don't emit any further
+ // errors due to this constant.
+ //
+ // TOK_DCON is used for symbolic constants, whose value may
+ // not yet be known.
+
+ int type;
+};
+
+extern Con *yylval_con;
+
+struct ConInit {
+ StringRef str;
+ Con con;
+
+ ConInit(const String *str, Con &con) : str(str), con(con)
+ {
+ }
+
+ ConInit(const ConInit &coninit)
+ {
+ *this = coninit;
+ }
+};
+
+class ConList : public list<ConInit>, public RefCountable<ConList> {};
+typedef Ref<ConList> ConListRef;
+
+// Like StrList, but is a list of possibly namespace-qualified identifiers.
+class IDList : public list<StrListRef>, public RefCountable<IDList> {};
+typedef Ref<IDList> IDListRef;
+
+
+class NameSpace;
+class LangCallback;
+
+// This is incremented when a chain of symbols is traversed to reset
+// detection of already-visited symbols. It is assumed that this
+// will not have to happen billions of times.
+
+extern int traversal;
+
+class Symbol : public RefCountable<Symbol> {
+ NameSpace *ns;
+
+public:
+ StringRef name;
+
+ // Symbol was loaded externally
+ bool external;
+
+ // If set, the symbol is private, and will not be available
+ // for lookups except those made directly in the context of
+ // the containing namespace. Private symbols do not get
+ // outputted. They are used to implement imports of specific
+ // symbols (as aliases), rather than entire namespaces.
+
+ bool priv;
+
+ // This is set to ::traversal when this symbol is visited along a chain.
+ // If a target needs more than 8 simultaneous chains, increase the size
+ // of the array. These traversals are reserved for language binding use.
+
+ int traversed[8];
+
+ Symbol()
+ {
+ ns = NULL;
+ external = false;
+ priv = false;
+
+ memset(traversed, 0, sizeof(traversed));
+ }
+
+ Symbol(const String *_name) : name(_name)
+ {
+ if (_name)
+ name->retain();
+
+ ns = NULL;
+ external = false;
+ priv = false;
+
+ memset(traversed, 0, sizeof(traversed));
+ }
+
+ virtual ~Symbol();
+
+ NameSpace *get_ns() const
+ {
+ return ns;
+ }
+
+ const String *get_name() const
+ {
+ return name;
+ }
+
+ // If append is non-NULL, it is appended to non-user namespaces,
+ // to facilitate a language binding that cannot place nested types
+ // in the same language construct as the actual interface. The
+ // recommended suffix is "_ns", which is a reserved ending in the
+ // IDL. No suffix is placed on the final component of the name,
+ // even if it is a non-user namespace. The not_last field is used
+ // to detect whether it is the final component; it is only used
+ // internally, and should always be false when called externally.
+ //
+ // This function does *NOT* add a null first element to indicate
+ // that the name is fully qualified. If you need that, you have
+ // to add it yourself (call 'push_front(new String(""))' on the
+ // result).
+
+ StrList *get_fq_name(const char *append = NULL,
+ bool not_last = false) const;
+
+ virtual void lookup_imports()
+ {
+ }
+
+ virtual void lookup_chain()
+ {
+ }
+
+ virtual void lookup_misc()
+ {
+ }
+
+ virtual void final_analysis()
+ {
+ }
+
+ // These two methods must be implemented by all IDL symbols, but are
+ // not needed in CDL symbols (and therefore are not pure virtual).
+
+ virtual void output(const char *root)
+ {
+ }
+
+ virtual void output_lang(LangCallback *lcb, int arg = 0, void *arg2 = NULL)
+ {
+ }
+
+ // Find and return the topmost symbol other than a user namespace
+ // containing this symbol. If this symbol's parent is a user
+ // namespace, it returns itself. May not be called on the
+ // toplevel namespace.
+
+ Symbol *find_toplevel_type();
+
+ // Get the true type of the symbol, regardless of whether it is an
+ // alias.
+
+ virtual Symbol *get_concrete_sym(bool follow_typedefs = true)
+ {
+ return this;
+ }
+
+ friend class NameSpace;
+};
+
+typedef Ref<Symbol> SymbolRef;
+
+class SymList : public list<SymbolRef>, public RefCountable<SymList> {};
+typedef Ref<SymList> SymListRef;
+
+struct SymbolNotFound {
+};
+
+struct DuplicateSymbol {
+};
+
+struct InvalidArgument {
+};
+
+struct UserError {
+};
+
+typedef Ref<NameSpace> NameSpaceRef;
+
+class NameSpace : public virtual Symbol {
+protected:
+ // Filesystem path to the external symbol storage, or NULL
+ // if not an import namespace.
+ //
+ // Import namespaces are read-only namespaces which are created
+ // for the importation of externally-declared symbols. One is
+ // initially created for each "mount point" specified by the user;
+ // whenever such a namespace is searched, it checks the external
+ // storage, and if the lookup succeeds, the symbol is loaded and
+ // added to the namespace. Failed lookups could be cached with a
+ // special BadSymbol or some such, as the imported namespace is
+ // assumed to be constant, but I don't think such an optimization
+ // is worthwhile, at least at this point.
+
+ StringRef path;
+
+ // Load a symbol from external storage, constructing the relevant type
+ // of object, and adding it to this namespace. Only called for
+ // import namespaces.
+
+ Symbol *load(const String *symname);
+
+ typedef map<StringRef, SymbolRef> tbl_type;
+ tbl_type tbl;
+
+ list<StrListRef> import_strs;
+ list<NameSpaceRef> imports;
+
+public:
+ // This is set in the destructor, so the contents of the namespace
+ // don't try to remove themselves from the namespace when destructed
+ // due to the destruction of map.
+ int dying;
+
+ // This is a counter for generating unique names for anonymous
+ // members of the namespace.
+ int anon;
+
+ NameSpace() : dying(0), anon(0)
+ {
+ }
+
+ virtual ~NameSpace()
+ {
+ dying = 1;
+ }
+
+ // Return a description of the type of namespace, for
+ // error messages.
+ virtual const char *description()
+ {
+ return "namespace";
+ }
+
+ virtual void output(const char *root);
+
+ typedef tbl_type::const_iterator const_iterator;
+ typedef tbl_type::value_type value_type;
+
+ // Derived classes can throw InvalidArgument if you give them
+ // a type of Symbol that they don't accept; see their comments
+ // for more details. Unfortunately, this cannot be done
+ // with static type-checking, as there are places that know
+ // they've been given a namespace that can accept a particular
+ // type of symbol, but they don't know exactly what kind of
+ // namespace it is. C++'s type system is not sufficient to
+ // express this (at least not while retaining any semblance
+ // of sanity).
+
+ // DuplicateSymbol is thrown if sym already exists in this namespace.
+ virtual void add(Symbol *sym, bool from_import)
+ {
+ if (path && !from_import)
+ throw InvalidArgument();
+
+ if (path)
+ sym->external = true;
+
+ if (!sym->name.data)
+ BUG();
+
+ pair<const_iterator, bool> ret = tbl.insert(value_type(sym->name, sym));
+
+ if (ret.second)
+ sym->ns = this;
+ else {
+ throw DuplicateSymbol();
+ }
+ }
+
+ // Add the symbol to this namespace, handling duplicate symbols by
+ // printing an error and throwing a UserError(). This should not be
+ // done in the symbol's constructor, as the parent may not accept
+ // the symbol until it is fully constructed (the RTTI information
+ // changes, and in general partially constructed objects shouldn't
+ // be exposed to the rest of the system).
+
+ void add_user(Symbol *sym);
+
+ // Like add_user, but used by the import code. Duplicate
+ // symbols result in internal errors, and the add is done with
+ // from_import set to true. InvalidArgument results in an error
+ // message and a reraise as UserError.
+ //
+ // All conditions checked by the parent namespace's add() method
+ // (such as constness of data) must be satisfied prior to calling
+ // add_import(). On the other hand, add_import() must be called
+ // before any recursive importation is done which could
+ // conceivably try to import the current symbol (thus causing
+ // infinite recursion).
+
+ void add_import(Symbol *sym, const char *filename);
+
+ // SymbolNotFound is thrown if sym is not in this namespace.
+ virtual void del(Symbol *sym)
+ {
+ fprintf(stderr, "Removing symbol %s\n",
+ sym->get_fq_name()->flatten()->c_str());
+
+ if (tbl.erase(sym->name) == 0)
+ throw SymbolNotFound();
+
+ sym->ns = NULL;
+ }
+
+private:
+ Symbol *lookup_noex_noimport(const String *symname)
+ {
+ const_iterator ret = tbl.find(symname);
+
+ if (ret != tbl.end())
+ return (*ret).second;
+
+ return NULL;
+ }
+
+public:
+ Symbol *lookup_noex(const String *symname, bool priv_ok = false)
+ {
+ Symbol *ret = NameSpace::lookup_noex_noimport(symname);
+
+ if (path && !ret)
+ ret = load(symname);
+
+ if (ret && !priv_ok && ret->priv)
+ return NULL;
+
+ return ret;
+ }
+
+ Symbol *lookup(const String *symname, bool priv_ok = false)
+ {
+ Symbol *ret = lookup_noex(symname, priv_ok);
+
+ if (!ret)
+ throw SymbolNotFound();
+
+ return ret;
+ }
+
+ // Like lookup_noex, but also checks imported namespaces,
+ // and returns the namespace containing the match rather
+ // than the match itself.
+
+ NameSpace *search(const String *name, Symbol *exclude);
+
+ void add_search(StrList *ns)
+ {
+ import_strs.push_back(ns);
+ }
+
+ // Return a string containing case information manglement.
+ // See input.cc for more information.
+
+ static const String *mangle(const String *name);
+
+ const String *get_path()
+ {
+ return path;
+ }
+
+ // Import all members of this namespace. A no-op if not an import
+ // namespace.
+ void import_all();
+
+ // As import_all, but also recursively applies to any sub-namespaces.
+ void import_all_recursive();
+
+ const_iterator begin()
+ {
+ return tbl.begin();
+ }
+
+ const_iterator end()
+ {
+ return tbl.end();
+ }
+
+ virtual void lookup_imports();
+
+ virtual void lookup_chain()
+ {
+ for (const_iterator i = begin(); i != end(); ++i) {
+ Symbol *sym = (*i).second;
+ sym->lookup_chain();
+ }
+ }
+
+ virtual void lookup_misc()
+ {
+ for (const_iterator i = begin(); i != end(); ++i) {
+ Symbol *sym = (*i).second;
+ sym->lookup_misc();
+ }
+ }
+
+ virtual void final_analysis()
+ {
+ for (const_iterator i = begin(); i != end(); ++i) {
+ Symbol *sym = (*i).second;
+ sym->final_analysis();
+ }
+ }
+};
+
+
+extern NameSpaceRef cur_nspace;
+extern list<NameSpaceRef> nspace_stack;
+
+typedef std::vector<StringRef> StringVec;
+
+string *stringvec_to_path(StringVec &stringvec, const char *prepend);
+
+// lookup_sym and lookup_type throw UserError on user error
+// The context namespace is required for the proper
+// set of namespaces to be searched.
+
+Symbol *lookup_sym(NameSpace *topns, StrList *name, NameSpace *ctx,
+ Symbol *exclude = NULL);
+
+class Def {
+ const char *self; // Pointer to the type-specific struct
+ int self_len; // Length of the type-specific struct
+
+protected:
+ CompiledDefHeader hdr;
+
+ // sym is the symbol from which to get the path/name, and
+ // dir is true if it should be "name/.self" rather than
+ // "name".
+ void output_self(const char *dir, Symbol *sym, bool dir);
+
+public:
+ Def(const char *self, int self_len, CompiledDefHeader::Type type) :
+ self(self), self_len(self_len)
+ {
+ hdr.magic = CompiledDefHeader::magic_normal;
+ hdr.type = type;
+ }
+
+ virtual ~Def()
+ {
+ }
+
+ // Specific types may override this to output extra data
+ // to the .self file without having to reopen the file.
+ //
+ // Returns false on error.
+
+ virtual bool output_extra(FILE *f)
+ {
+ return true;
+ }
+};
+
+// Internal context struct used by input.cc to avoid passing
+// lots of parameters around
+struct ImportContext;
+
+// This represents an actual IDL namespace {}, rather than
+// a derived namespace such as a Struct or Interface.
+
+class UserNameSpace : public NameSpace, public Def {
+public:
+ CompiledNameSpace def;
+ StringRef mountpoint_name;
+
+ UserNameSpace(const String *name = NULL) :
+ Symbol(name),
+ Def((const char *)&def, sizeof(def), CompiledDefHeader::NameSpace)
+ {
+ mountpoint_name = get_fq_name()->flatten();
+ def.length = mountpoint_name->length();
+ }
+
+ virtual void output(const char *root);
+ bool output_extra(FILE *f);
+
+ static void declare_import(const char *path);
+
+ static UserNameSpace *import(ImportContext &ctx);
+ virtual void output_lang(LangCallback *lcb, int arg = 0, void *arg2 = NULL);
+};
+
+typedef Ref<UserNameSpace> UserNameSpaceRef;
+extern UserNameSpaceRef toplevel, cdl_toplevel;
+extern UserNameSpace *output_ns;
+
+class Type : public virtual Symbol {
+public:
+ Type()
+ {
+ }
+
+ virtual ~Type()
+ {
+ }
+
+ virtual int get_default_bf_size()
+ {
+ // Only allow types in bitfields that explicitly
+ // support it.
+
+ return -1;
+ }
+};
+
+typedef Ref<Type> TypeRef;
+
+// ctx can be NULL if basic_types_only is true
+Type *lookup_type(StrList *sl, NameSpace *ctx, bool basic_types_only = false);
+
+class BasicType : public Type, public Def {
+public:
+ CompiledBasicType def;
+ bool complete;
+
+ BasicType(const String *name) :
+ Symbol(name),
+ Def((const char *)&def, sizeof(def), CompiledDefHeader::BasicType)
+ {
+ complete = false;
+ memset(&def, 0, sizeof(def));
+ }
+
+ void init(CompiledBasicType &DEF)
+ {
+ assert(!complete);
+
+ def = DEF;
+ complete = true;
+ }
+
+ static BasicType *declare(const String *name, NameSpace *parent,
+ CompiledBasicType &DEF)
+ {
+ BasicType *bt = new BasicType(name);
+ bt->init(DEF);
+
+ if (parent)
+ parent->add_user(bt);
+
+ return bt;
+ }
+
+ virtual void output(const char *root);
+
+ static BasicType *import(ImportContext &ctx);
+ virtual void output_lang(LangCallback *lcb, int arg = 0, void *arg2 = NULL);
+};
+
+static inline bool is_array(CompiledBasicType &bt)
+{
+ return bt.array.bounds[0] || bt.array.bounds[1];
+}
+
+static inline bool is_array(CompiledBasicType *bt)
+{
+ return is_array(*bt);
+}
+
+typedef Ref<Datum> DatumRef;
+
+class Array : public RefCountable<Array>
+{
+ NameSpace *lookup_ctx;
+ DatumRef datums[2];
+
+ // lower is [0], upper is [1]
+ StrListRef dcons[2];
+ Con cons[2];
+
+ // Strings for error reporting on each constant. If the constant
+ // is symbolic, then this is the fully qualified symbol name.
+ // Otherwise, it is the numerical value converted to a string. In
+ // each case, the file/line shall correspond to where the array
+ // bound was specified.
+
+ StringRef strs[2];
+
+public:
+ // ca is not valid until after final_analysis() is called.
+ CompiledArray ca;
+
+ Array(NameSpace *LOOKUP_CTX);
+ void set_bound(Con &con, int bound);
+ void final_analysis();
+
+ void set_unbounded();
+};
+
+typedef Ref<Array> ArrayRef;
+
+class Datum : public Symbol, public Def {
+ StrListRef type_name;
+ SymbolRef type_sym;
+ StringRef type_fq_name;
+
+ StrListRef const_val_name;
+ SymbolRef const_val_sym;
+ DatumRef const_val;
+
+ bool basic; // Datum is of a BasicType
+ bool complete;
+ bool const_init; // Datum's constant has been initialized; this is
+ // true after a successful verify_const().
+ CompiledBasicType *cbt;
+
+ ArrayRef array;
+
+ int chain_traversed;
+
+ // Recursively retrieve the actual value of a const datum
+ // initialized with another named const datum. Returns
+ // the "end" of an infinite loop, if one is found. Once
+ // the full infinite loop has been printed, UserError is
+ // thrown.
+
+ Datum *resolve_constant_chain();
+
+ void init_const_early(Con *con);
+
+ void use_anon_type(const CompiledBasicType &CBT)
+ {
+ def.basictype = CBT;
+ cbt = &def.basictype;
+ basic = true;
+ type = NULL;
+ def.type.length = 0;
+ }
+
+ void process_type();
+
+ void set_array(Array *ARRAY)
+ {
+ if (ARRAY)
+ array = ARRAY;
+ }
+
+public:
+ CompiledDatum def;
+ TypeRef type;
+
+ int con_type; // Used to store the TOK_[IUF]CON of the Con struct
+ // for type checking once the type is known.
+
+ Datum(const String *name) :
+ Symbol(name),
+ Def((const char *)&def, sizeof(def), CompiledDefHeader::Datum)
+ {
+ complete = false;
+ const_init = false;
+ chain_traversed = 0;
+ memset(&def, 0, sizeof(def));
+ }
+
+ void init(StrList *type, Array *ARRAY, Con *con = NULL);
+ void init(CompiledBasicType &cbt, Array *ARRAY, Con *con = NULL);
+
+ static Datum *declare(const String *name, NameSpace *parent,
+ StrList *type, Array *ARRAY,
+ Con *con = NULL)
+ {
+ assert(parent);
+
+ Datum *d = new Datum(name);
+ d->init(type, ARRAY, con);
+
+ parent->add_user(d);
+ return d;
+ }
+
+ static Datum *declare(const String *name, NameSpace *parent,
+ CompiledBasicType &type,
+ Array *ARRAY, Con *con = NULL)
+ {
+ assert(parent);
+
+ Datum *d = new Datum(name);
+ d->init(type, ARRAY, con);
+
+ parent->add_user(d);
+ return d;
+ }
+
+ void set_inline()
+ {
+ def.flags.field.Inline = 1;
+ }
+
+ bool is_inline()
+ {
+ return def.flags.field.Inline;
+ }
+
+ void set_immutable()
+ {
+ def.flags.field.Immutable = 1;
+ }
+
+ bool is_immutable()
+ {
+ return def.flags.field.Immutable;
+ }
+
+ // Returns true if the constant was acceptable, false otherwise (an
+ // error is also output to the user in this case).
+
+ bool verify_const();
+
+ virtual void lookup_chain()
+ {
+ assert(complete);
+
+ if (const_val_name)
+ const_val_sym = lookup_sym(toplevel, const_val_name, get_ns());
+
+ if (type_name) {
+ assert(!basic);
+ type_sym = lookup_type(type_name, get_ns(), true);
+ if (!type_sym)
+ type_sym = lookup_sym(toplevel, type_name, get_ns());
+ } else {
+ assert(basic);
+ }
+ }
+
+ virtual void lookup_misc()
+ {
+ if (def.flags.field.Const) {
+ if (!const_init) {
+ assert(def.flags.field.Const);
+
+ traversal++;
+ Datum *d = resolve_constant_chain();
+ assert(!d);
+ }
+
+ assert(const_init);
+ } else {
+ process_type();
+ }
+ }
+
+ virtual void final_analysis();
+
+ virtual void output(const char *root);
+ bool output_extra(FILE *f);
+
+ static Datum *import(ImportContext &ctx);
+ virtual void output_lang(LangCallback *lcb, int arg = 0, void *arg2 = NULL);
+
+ uint64_t get_ucon(const String *err_str)
+ {
+ if (!def.flags.field.Const) {
+ yyerrorfl(err_str->file, err_str->line,
+ "\"%s\" is not a const Datum.\n",
+ get_fq_name()->flatten()->c_str());
+ throw UserError();
+ }
+
+ assert(const_init);
+ return def.ucon;
+ }
+
+ bool is_array()
+ {
+ return ::is_array(def.basictype);
+ }
+};
+
+template<typename T>
+bool output_list(T *sym, FILE *f);
+
+class BitField : public NameSpace, public Type, public Def {
+ list<DatumRef> entries;
+
+ void add_elem(Datum *d);
+
+public:
+ CompiledBitField def;
+ int cur_pos;
+
+ BitField(const String *name) :
+ Symbol(name),
+ Def((const char *)&def, sizeof(def), CompiledDefHeader::BitField)
+ {
+ memset(&def, 0, sizeof(def));
+ }
+
+ void init(int bits, NameSpace *parent)
+ {
+ if (bits < 0 || bits > 64) {
+ yyerrorf("\"%s\" has invalid bitfield size %d",
+ name->c_str(), bits);
+
+ bits = bits < 0 ? 0 : 64;
+ }
+
+ def.bits = bits;
+ }
+
+ static BitField *declare(const String *name, NameSpace *parent,
+ int bits)
+ {
+ assert(parent);
+
+ BitField *bf = new BitField(name);
+ bf->init(bits, parent);
+
+ parent->add_user(bf);
+ return bf;
+ }
+
+ // Only integral Datums, Enums, and BitFields can be added.
+
+ void add(Symbol *sym, bool from_import);
+
+ virtual const char *description()
+ {
+ return "bitfield";
+ }
+
+ virtual void lookup_misc()
+ {
+ NameSpace::lookup_misc();
+ }
+
+ virtual void final_analysis()
+ {
+ // FIXME: check total size of elements
+
+ NameSpace::final_analysis();
+ }
+
+ int get_default_bf_size()
+ {
+ return def.bits;
+ }
+
+ virtual void output(const char *root);
+ bool output_extra(FILE *f);
+
+ static BitField *import(ImportContext &ctx);
+ virtual void output_lang(LangCallback *lcb, int arg = 0, void *arg2 = NULL);
+
+ typedef list<DatumRef>::const_iterator entries_iterator;
+ typedef list<DatumRef>::const_reverse_iterator entries_reverse_iterator;
+
+ entries_iterator entries_begin()
+ {
+ return entries.begin();
+ }
+
+ entries_iterator entries_end()
+ {
+ return entries.end();
+ }
+
+ entries_reverse_iterator entries_rbegin()
+ {
+ return entries.rbegin();
+ }
+
+ entries_reverse_iterator entries_rend()
+ {
+ return entries.rend();
+ }
+};
+
+class Struct;
+typedef Ref<Struct> StructRef;
+extern Struct *System_VStruct;
+
+// FIXME: typedefed superstructs
+class Struct : public NameSpace, public Type, public Def {
+ list<DatumRef> entries;
+ StructRef super;
+ SymbolRef supersym;
+ StrListRef supername;
+ bool attrs_resolved;
+
+ void add_elem(Datum *d);
+
+ void resolve_attrs()
+ {
+ if (attrs_resolved)
+ return;
+
+ if (super && !super->attrs_resolved)
+ super->resolve_attrs();
+
+ if (super && super->def.flags.field.Virtual)
+ def.flags.field.Virtual = 1;
+
+ attrs_resolved = true;
+ }
+
+public:
+ CompiledStruct def;
+
+ // This is not maintained by the generic code, but can be
+ // used by language bindings to cache the result of the
+ // summation.
+
+ int chainlen;
+
+ Struct(const String *name) :
+ Symbol(name),
+ Def((const char *)&def, sizeof(def), CompiledDefHeader::Struct)
+ {
+ memset(&def, 0, sizeof(def));
+ attrs_resolved = 0;
+ }
+
+ void init(StrList *SUPERNAME)
+ {
+ supername = SUPERNAME;
+ }
+
+ static Struct *declare(const String *name, NameSpace *parent,
+ StrList *SUPERNAME)
+ {
+ assert(parent);
+
+ Struct *st = new Struct(name);
+ st->init(SUPERNAME);
+
+ parent->add_user(st);
+ return st;
+ }
+
+ void set_virtual()
+ {
+ def.flags.field.Virtual = 1;
+ }
+
+ void set_inline()
+ {
+ def.flags.field.Inline = 1;
+ }
+
+ bool is_virtual()
+ {
+ return def.flags.field.Virtual;
+ }
+
+ bool is_inline()
+ {
+ return def.flags.field.Inline;
+ }
+
+ // Only Datums and Types can be added.
+
+ void add(Symbol *sym, bool from_import);
+
+ virtual const char *description()
+ {
+ return "struct";
+ }
+
+ Struct *get_super()
+ {
+ assert(current_pass >= 4);
+ return super;
+ }
+
+ virtual void lookup_chain()
+ {
+ if (supername) {
+ supersym = lookup_sym(toplevel, supername, get_ns());
+ assert(supersym);
+ }
+
+ NameSpace::lookup_chain();
+ }
+
+private:
+ void lookup_super()
+ {
+ if (supersym && !super) {
+ super = dynamic_cast<Struct *>(supersym->get_concrete_sym());
+
+ if (!super) {
+ const String *str = supername->back();
+ yyerrorfl(str->file, str->line,
+ "\"%s\" is not a struct.",
+ supersym->get_fq_name()->flatten()->c_str());
+ }
+
+ def.flags.field.Super = 1;
+ super->lookup_super();
+
+ if (super->is_virtual())
+ set_virtual();
+ }
+
+ if (is_virtual() && !supersym && !super) {
+ assert(System_VStruct);
+ if (this != System_VStruct) {
+ def.flags.field.Super = 1;
+ super = System_VStruct;
+ }
+ }
+ }
+
+public:
+ virtual void lookup_misc()
+ {
+ lookup_super();
+
+ if (is_virtual() && def.guid[0] == 0 && def.guid[1] == 0)
+ yyerrorfl(name->file, name->line,
+ "Virtual struct \"%s\" is missing a GUID.",
+ get_fq_name()->flatten()->c_str());
+
+ NameSpace::lookup_misc();
+ }
+
+ virtual void final_analysis()
+ {
+ // FIXME: check for infinite loops in struct inheritance
+
+ resolve_attrs();
+ NameSpace::final_analysis();
+ }
+
+ virtual void output(const char *root);
+ bool output_extra(FILE *f);
+
+ static Struct *import(ImportContext &ctx);
+ virtual void output_lang(LangCallback *lcb, int arg = 0, void *arg2 = NULL);
+
+ typedef list<DatumRef>::const_iterator entries_iterator;
+
+ entries_iterator entries_begin()
+ {
+ return entries.begin();
+ }
+
+ entries_iterator entries_end()
+ {
+ return entries.end();
+ }
+
+ void set_guid(uint64_t guid[2])
+ {
+ if (def.guid[0] || def.guid[1])
+ yyerrorf("\"%s\" already has a GUID.",
+ get_fq_name()->flatten()->c_str());
+
+ def.guid[0] = guid[0];
+ def.guid[1] = guid[1];
+ }
+};
+
+class Param : public Symbol, public Def {
+ StrListRef type_name;
+ StringRef type_fq_name;
+
+ bool basic; // Datum is of a BasicType
+ bool complete;
+
+ ArrayRef array;
+
+ void use_named_type(BasicType *bt)
+ {
+ assert(!bt || bt->def.flags.field.TypeDef);
+
+ basic = false;
+
+ type_fq_name = type->get_fq_name()->flatten();
+ def.type.length = type_fq_name->length();
+ }
+
+ void use_anon_type(const CompiledBasicType &cbt)
+ {
+ def.basictype = cbt;
+ basic = true;
+ type = NULL;
+ }
+
+ void set_array(Array *ARRAY)
+ {
+ if (ARRAY)
+ array = ARRAY;
+ }
+
+public:
+ CompiledParam def;
+ TypeRef type;
+
+ Param(const String *name) :
+ Symbol(name),
+ Def((const char *)&def, sizeof(def), CompiledDefHeader::Param)
+ {
+ memset(&def, 0, sizeof(def));
+ }
+
+ void init(StrList *TYPE, CompiledParam::Flags flags, Array *ARRAY)
+ {
+ type_name = TYPE;
+ def.flags = flags;
+ set_array(ARRAY);
+ }
+
+ static Param *declare(const String *name, NameSpace *parent,
+ StrList *TYPE, CompiledParam::Flags flags,
+ Array *ARRAY);
+
+ virtual void lookup_misc()
+ {
+ type = lookup_type(type_name, get_ns());
+ }
+
+ virtual void final_analysis()
+ {
+ BasicType *bt = dynamic_cast<BasicType *>(type->get_concrete_sym());
+
+ if (bt && !bt->def.flags.field.TypeDef) {
+ use_anon_type(bt->def);
+ } else {
+ use_named_type(bt);
+
+ Struct *str = dynamic_cast<Struct *>(*type);
+ if (str && str->is_inline())
+ set_inline();
+
+ if (!str && is_inline()) {
+ yyerrorfl(name->file, name->line,
+ "\"%s\" is static but not a struct.",
+ get_fq_name()->flatten()->c_str());
+ }
+ }
+
+ if (array) {
+ array->final_analysis();
+ def.basictype.array = array->ca;
+ } else {
+ def.basictype.array.bounds[0] = 0;
+ def.basictype.array.bounds[1] = 0;
+ }
+ }
+
+ void set_inline()
+ {
+ def.flags.field.Inline = 1;
+ }
+
+ bool is_inline()
+ {
+ return def.flags.field.Inline;
+ }
+
+ bool is_in()
+ {
+ return def.flags.field.In;
+ }
+
+ bool is_out()
+ {
+ return def.flags.field.Out;
+ }
+
+ virtual void output(const char *root);
+ bool output_extra(FILE *f);
+
+ static Param *import(ImportContext &ctx);
+ virtual void output_lang(LangCallback *lcb, int arg = 0, void *arg2 = NULL);
+
+ bool is_array()
+ {
+ return ::is_array(def.basictype);
+ }
+};
+
+typedef Ref<Param> ParamRef;
+
+class Method : public NameSpace, public Def {
+ list<ParamRef> entries;
+
+ void add_elem(Param *p);
+
+public:
+ CompiledMethod def;
+
+ Method(const String *name) :
+ Symbol(name),
+ Def((const char *)&def, sizeof(def), CompiledDefHeader::Method)
+ {
+ memset(&def, 0, sizeof(def));
+ }
+
+ void set_async()
+ {
+ def.flags.field.Async = 1;
+ }
+
+ bool is_async()
+ {
+ return def.flags.field.Async;
+ }
+
+ static Method *declare(const String *name, NameSpace *parent);
+
+ void add(Symbol *sym, bool from_import);
+
+ virtual const char *description()
+ {
+ return "method";
+ }
+
+ virtual void output(const char *root);
+ bool output_extra(FILE *f);
+
+ static Method *import(ImportContext &ctx);
+ virtual void output_lang(LangCallback *lcb, int arg = 0, void *arg2 = NULL);
+
+ typedef list<ParamRef>::const_iterator entries_iterator;
+
+ entries_iterator entries_begin()
+ {
+ return entries.begin();
+ }
+
+ entries_iterator entries_end()
+ {
+ return entries.end();
+ }
+};
+
+typedef Ref<Method> MethodRef;
+
+class Interface;
+typedef Ref<Interface> InterfaceRef;
+
+extern Interface *System_Object;
+
+// FIXME: typedefed superinterfaces
+class Interface : public NameSpace, public Type, public Def {
+ list<MethodRef> methods;
+ list<InterfaceRef> supers;
+ IDListRef supernames;
+
+ void add_elem(Method *m);
+
+ // This is like Symbol::traversed[], but used internally by the
+ // for_each_super function to ensure that common ancestors are only
+ // visited once.
+
+ int traversed_all_supers;
+ static int all_supers_traversal;
+
+public:
+ typedef void (*callback)(Interface *i, void *arg);
+
+private:
+ template<callback cb>
+ void for_each_super_internal(void *arg)
+ {
+ for (supers_iterator i = supers_begin(); i != supers_end(); ++i) {
+ Interface *iface = *i;
+
+ if (iface->traversed_all_supers < all_supers_traversal) {
+ iface->traversed_all_supers = all_supers_traversal;
+ cb(iface, arg);
+ iface->for_each_super_internal<cb>(arg);
+ }
+ }
+ }
+
+ // All interfaces in the map and vector are supers of this
+ // interface, and thus retained that way, so plain pointers
+ // can be used here.
+
+ typedef map<Interface *, int> chain_map_type;
+ typedef chain_map_type::value_type chain_valtype;
+ typedef chain_map_type::const_iterator chain_iter;
+
+ chain_map_type super_to_chain_map;
+
+private:
+ int num_chains;
+ vector<Interface *> chain_heads;
+
+public:
+ int super_to_chain(Interface *iface, bool must_find_it = true)
+ {
+ chain_iter ret = super_to_chain_map.find(iface);
+
+ if (ret == super_to_chain_map.end()) {
+ assert(!must_find_it);
+ return -1;
+ }
+
+ return (*ret).second;
+ }
+
+ Interface *get_chain_head(int chain)
+ {
+ return chain_heads[chain];
+ }
+
+ int get_num_chains()
+ {
+ return num_chains;
+ }
+
+private:
+ void set_chain(Interface *iface, int chain)
+ {
+ pair<chain_iter, bool> ret =
+ super_to_chain_map.insert(chain_valtype(iface, chain));
+ assert(ret.second);
+ }
+
+ // This is the inner depth-first search, which terminates
+ // at each level upon finding that the node it had previously
+ // recursed into found an unchained node.
+
+ void pick_chain(Interface *iface, int chain)
+ {
+ assert(super_to_chain(iface, false) == -1);
+ chain_heads.push_back(iface);
+
+ do {
+ set_chain(iface, chain);
+
+ if (iface->supers.empty())
+ break;
+
+ iface = iface->supers.front();
+ } while (super_to_chain(iface, false) == -1);
+ }
+
+ // This is the outer breadth-first-search, making sure every
+ // super is assigned to a chain.
+
+ void sort_chains()
+ {
+ list<Interface *> bfs;
+ num_chains = 0;
+
+ bfs.push_back(this);
+
+ while (!bfs.empty()) {
+ Interface *iface = bfs.front();
+ bfs.pop_front();
+
+ for (supers_iterator i = iface->supers_begin();
+ i != iface->supers_end(); ++i)
+ bfs.push_back(*i);
+
+ if (super_to_chain(iface, false) == -1)
+ pick_chain(iface, num_chains++);
+ }
+ }
+
+public:
+ // Do not call after lookup_misc
+ void add_super(Interface *i)
+ {
+ assert(current_pass != 1);
+
+ supers.push_back(i);
+ def.num_supers++;
+ }
+
+ CompiledInterface def;
+
+ Interface(const String *name) :
+ Symbol(name),
+ Def((const char *)&def, sizeof(def), CompiledDefHeader::Interface)
+ {
+ memset(&def, 0, sizeof(def));
+ traversed_all_supers = 0;
+ }
+
+ void init(IDList *SUPERNAMES)
+ {
+ supernames = SUPERNAMES;
+ }
+
+ static Interface *declare(const String *name, NameSpace *parent,
+ IDList *SUPERNAMES)
+ {
+ assert(parent);
+
+ Interface *i = new Interface(name);
+ i->init(SUPERNAMES);
+
+ parent->add_user(i);
+ return i;
+ }
+
+ // Only Methods, Types, and const BasicType Datums can be added.
+
+ void add(Symbol *sym, bool from_import);
+
+ virtual const char *description()
+ {
+ return "interface";
+ }
+
+private:
+ void add_object_super()
+ {
+ assert(System_Object);
+ if (this != System_Object && supers.empty())
+ add_super(System_Object);
+ }
+
+public:
+ virtual void lookup_misc()
+ {
+ if (def.guid[0] == 0 && def.guid[1] == 0)
+ yyerrorfl(name->file, name->line,
+ "Interface \"%s\" is missing a GUID.",
+ get_fq_name()->flatten()->c_str());
+
+ if (supernames) {
+ for (IDList::iterator i = supernames->begin();
+ i != supernames->end(); ++i)
+ {
+ Symbol *sym = lookup_sym(toplevel, *i, get_ns());
+ Interface *iface =
+ dynamic_cast<Interface *>(sym->get_concrete_sym());
+
+ if (!iface) {
+ const String *str = (*i)->back();
+ yyerrorfl(str->file, str->line,
+ "\"%s\" is not an interface.\n",
+ sym->get_fq_name()->flatten()->c_str());
+
+ throw UserError();
+ }
+
+ add_super(iface);
+ }
+ }
+
+ add_object_super();
+ NameSpace::lookup_misc();
+ }
+
+ virtual void final_analysis()
+ {
+ // FIXME: check for infinite loops in inheritance
+
+ sort_chains();
+ NameSpace::final_analysis();
+ }
+
+ virtual void output(const char *root);
+ bool output_extra(FILE *f);
+
+ static Interface *import(ImportContext &ctx);
+ virtual void output_lang(LangCallback *lcb, int arg = 0, void *arg2 = NULL);
+
+ typedef list<MethodRef>::const_iterator methods_iterator;
+ typedef list<InterfaceRef>::const_iterator supers_iterator;
+
+ supers_iterator supers_begin()
+ {
+ assert(current_pass != 1);
+ return supers.begin();
+ }
+
+ supers_iterator supers_end()
+ {
+ return supers.end();
+ }
+
+ bool supers_empty()
+ {
+ assert(current_pass != 1);
+ return supers.empty();
+ }
+
+ methods_iterator methods_begin()
+ {
+ return methods.begin();
+ }
+
+ methods_iterator methods_end()
+ {
+ return methods.end();
+ }
+
+ template<callback cb>
+ void for_each_super(void *arg)
+ {
+ assert(current_pass >= 4);
+
+ all_supers_traversal++;
+ for_each_super_internal<cb>(arg);
+ }
+
+ void finalize_class_iface()
+ {
+ add_object_super();
+ sort_chains();
+ }
+
+ void set_guid(uint64_t guid[2])
+ {
+ if (def.guid[0] || def.guid[1])
+ yyerrorf("\"%s\" already has a GUID.",
+ get_fq_name()->flatten()->c_str());
+
+ def.guid[0] = guid[0];
+ def.guid[1] = guid[1];
+ }
+};
+
+class IFaceList : public list<InterfaceRef>,
+ public RefCountable<IFaceList> {};
+
+class Enum : public NameSpace, public Type, public Def {
+ list<DatumRef> entries;
+
+ void add_elem(Datum *d);
+
+public:
+ unsigned int next_val;
+ CompiledEnum def;
+
+ Enum(const String *name) :
+ Symbol(name),
+ Def((const char *)&def, sizeof(def), CompiledDefHeader::Enum),
+ next_val(0)
+ {
+ memset(&def, 0, sizeof(def));
+ }
+
+ void init(int bits)
+ {
+ if (bits < 0 || bits > 64) {
+ yyerrorf("\"%s\" has invalid enum size %d",
+ name->c_str(), bits);
+
+ bits = bits < 0 ? 0 : 64;
+ }
+
+ def.bits = bits;
+ }
+
+ static Enum *declare(const String *name, NameSpace *parent,
+ int bits)
+ {
+ assert(parent);
+
+ Enum *e = new Enum(name);
+ e->init(bits);
+
+ parent->add_user(e);
+ return e;
+ }
+
+ // Only const unsigned integer BasicType Datums are allowed.
+
+ void add(Symbol *sym, bool from_import);
+
+ virtual const char *description()
+ {
+ return "enumeration";
+ }
+
+ int get_default_bf_size()
+ {
+ return def.bits;
+ }
+
+ virtual void output(const char *root);
+ bool output_extra(FILE *f);
+
+ static Enum *import(ImportContext &ctx);
+ virtual void output_lang(LangCallback *lcb, int arg = 0, void *arg2 = NULL);
+
+ typedef list<DatumRef>::const_iterator entries_iterator;
+
+ entries_iterator entries_begin()
+ {
+ return entries.begin();
+ }
+
+ entries_iterator entries_end()
+ {
+ return entries.end();
+ }
+};
+
+class Alias : public Symbol, public Def {
+ bool lookup_begun;
+
+ struct Cycle {
+ Alias *end;
+
+ Cycle(Alias *END) : end(END)
+ {
+ }
+ };
+
+public:
+ CompiledAlias def;
+
+ SymbolRef real_sym;
+ StringRef sym_fq_name;
+ StrListRef sym_name;
+
+ Alias(const String *name) :
+ Symbol(name),
+ Def((const char *)&def, sizeof(def), CompiledDefHeader::Alias)
+ {
+ memset(&def, 0, sizeof(def));
+ lookup_begun = false;
+ }
+
+ void init(StrList *symname, bool is_private)
+ {
+ sym_name = symname;
+ priv = is_private;
+ }
+
+ static Alias *declare(const String *name, NameSpace *parent,
+ StrList *symname, bool is_private = false)
+ {
+ assert(parent);
+ Alias *a = new Alias(name);
+ a->init(symname, is_private);
+
+ parent->add_user(a);
+ return a;
+ }
+
+ void resolve_chain()
+ {
+ if (!real_sym) {
+ if (lookup_begun) {
+ yyerrorfl(name->file, name->line,
+ "Alias loop defining \"%s\"",
+ get_fq_name()->flatten()->c_str());
+
+ throw Cycle(this);
+ }
+
+ lookup_begun = true;
+
+ try {
+ real_sym = lookup_sym(toplevel, sym_name, get_ns(), this);
+ }
+
+ catch (Cycle &c) {
+ yyerrorfl(name->file, name->line, " ...referenced by \"%s\"",
+ get_fq_name()->flatten()->c_str());
+
+ if (c.end == this)
+ throw UserError();
+
+ throw c;
+ }
+ }
+ }
+
+ virtual Symbol *get_concrete_sym(bool follow_typedefs = true)
+ {
+ resolve_chain();
+ return real_sym->get_concrete_sym(follow_typedefs);
+ }
+
+ virtual void lookup_chain()
+ {
+ get_concrete_sym(true);
+ }
+
+ virtual void lookup_misc()
+ {
+ real_sym = real_sym->get_concrete_sym(false);
+ sym_fq_name = real_sym->get_fq_name()->flatten();
+
+ def.length = sym_fq_name->length();
+ }
+
+ virtual void output(const char *root);
+ bool output_extra(FILE *f);
+
+ static Alias *import(ImportContext &ctx);
+ virtual void output_lang(LangCallback *lcb, int arg = 0, void *arg2 = NULL);
+};
+
+class TypeDef : public Alias {
+public:
+ TypeDef(const String *name) : Alias(name)
+ {
+ memset(&def, 0, sizeof(def));
+ hdr.type = CompiledDefHeader::TypeDef;
+ }
+
+ static TypeDef *declare(const String *name, NameSpace *parent,
+ StrList *symname)
+ {
+ assert(parent);
+ TypeDef *td = new TypeDef(name);
+ td->init(symname, false);
+
+ parent->add_user(td);
+ return td;
+ }
+
+ virtual Symbol *get_concrete_sym(bool follow_typedefs = true)
+ {
+ if (follow_typedefs) {
+ resolve_chain();
+ return real_sym->get_concrete_sym(follow_typedefs);
+ }
+
+ return this;
+ }
+
+ static TypeDef *import(ImportContext &ctx);
+ virtual void output_lang(LangCallback *lcb, int arg = 0, void *arg2 = NULL);
+};
+
+NameSpace *add_nspace(StrList *name, bool push);
+void pop_nspace();
+
+// Declare an instance of "type" in "ns" for each element of "ids".
+// This function will report any errors, but not throw UserError.
+
+void declare_data(NameSpace *ns, StrList *ids, StrList *type,
+ Array *array, StrList *attr);
+void declare_aliases(NameSpace *ns, StrList *ids, StrList *type,
+ bool is_typedef);
+void declare_basictypes(NameSpace *ns, StrList *ids,
+ BasicType *type, bool is_typedef);
+
+// You'd think they'd have standard functions to do this these days.
+// All I could find that come close are the network-byte-order
+// functions, and they're no-ops on big-endian machines.
+
+static inline uint32_t swap32(uint32_t in, bool swap)
+{
+ if (swap)
+ return ((in & 0x000000ff) << 24) |
+ ((in & 0x0000ff00) << 8) |
+ ((in & 0x00ff0000) >> 8) |
+ ((in & 0xff000000) >> 24);
+
+ return in;
+}
+
+static inline uint64_t swap64(uint64_t in, bool swap)
+{
+ if (swap)
+ return (((uint64_t)swap32((uint32_t)in, true)) << 32) |
+ swap32((uint32_t)(in >> 32), true);
+
+ return in;
+}
+
+struct File {
+ FILE *f;
+
+ File()
+ {
+ f = NULL;
+ }
+
+ File(FILE *F)
+ {
+ f = F;
+ }
+
+ File *operator =(FILE *F)
+ {
+ f = F;
+ return this;
+ }
+
+ ~File()
+ {
+ if (f)
+ fclose(f);
+ }
+
+ operator FILE *()
+ {
+ return f;
+ }
+};
+
+// Verify that a prospective new import (or output namespace)
+// does not overlap an existing import. Returns the conflicting
+// import, or NULL if none found.
+
+NameSpace *check_for_imports(NameSpace *ns);
+
+#endif
--- /dev/null
+%{
+/* idlparse.y -- parser for the IDL compiler
+ *
+ * Written by Scott Wood <scott@buserror.net>
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <string>
+
+#include <idlc.h>
+#include <util.h>
+#define YYDEBUG 1
+
+#if 1
+#define do_yyerror() do { \
+ fprintf(stderr, "YYERROR at %d\n", __LINE__); \
+ throw UserError(); \
+} while (0)
+#else
+#define do_yyerror() YYERROR
+#endif
+
+static StrListRef nspace_name;
+static StrListRef cur_strlist;
+static ConListRef cur_conlist;
+static IDListRef cur_idlist;
+
+%}
+%union {
+ // The lifetime of any of these pointers is one instance
+ // of the one_input rule.
+
+ String *string;
+ StrList *strl;
+ IDList *idl;
+
+ struct Decl { // Used for namespace-qualified type declarations
+ NameSpace *ns; // Namespace portion -- all but last field
+ const String *ident; // New identifier portion -- last field
+ } decl;
+
+ SymList *syml;
+ Symbol *sym;
+ Enum *en;
+ Struct *str;
+ BitField *bf;
+ Interface *iface;
+ Method *meth;
+ Datum *datum;
+ bool boolean;
+ IFaceList *ifl;
+
+ Con con;
+ ConList *conl;
+ Array *array;
+
+ struct {
+ StrList *type;
+ Array *array;
+ } type;
+
+ int val;
+
+ struct {
+ Decl decl;
+ StrList *attr;
+ } struct_decl_attr;
+
+ struct {
+ StrList *ids;
+ StrList *attr;
+ } ids_attr;
+
+ uint64_t guid[2];
+}
+
+// The token list must be exactly the same as in idlparse.y, so that
+// the same lexer can be used.
+
+%token <string> TOK_IDENT
+%token TOK_IFACE
+%token TOK_STRUCT
+%token TOK_CHAR
+%token TOK_OCTET
+%token <con> TOK_ICON
+%token <con> TOK_FCON
+%token <con> TOK_UCON
+%token <con> TOK_INVALID
+%token TOK_BOOL
+%token TOK_SHORT
+%token TOK_INT
+%token TOK_LONG
+%token TOK_USHORT
+%token TOK_UINT
+%token TOK_ULONG
+%token TOK_FSHORT
+%token TOK_FLONG
+%token TOK_CONST
+%token TOK_BITFIELD
+%token TOK_ENUM
+%token TOK_NAMESPACE
+%token TOK_USING
+%token TOK_ASYNC
+%token TOK_INOUT
+%token TOK_OUT
+%token TOK_3DOT
+%token TOK_2DOT
+%token <string> TOK_STR
+%token TOK_SHARED
+%token TOK_PUSH
+%token TOK_TYPEDEF
+%token TOK_ALIAS
+%token TOK_VIRTUAL
+%token TOK_GUID
+%token TOK_INLINE
+%token TOK_STATIC
+%token TOK_IMMUTABLE
+%token TOK_TRUE
+%token TOK_FALSE
+
+// CDL tokens
+%token TOK_COPY
+%token TOK_METHOD
+%token TOK_CLASS
+%token TOK_NAME
+
+// These are not real tokens, but are used as special values in places that
+// normally accept tokens.
+%token TOK_NONE
+%token TOK_ANON
+%token TOK_DCON
+
+%type <strl> basictype
+%type <type> type
+%type <con> const
+%type <con> maybeconst
+%type <array> arraybounds
+%type <con> constnominus
+%type <datum> bfelem
+%type <syml> bfelems
+%type <syml> maybe_bfelems
+%type <strl> maybe_ids
+%type <strl> ids
+%type <conl> ideqs
+%type <string> ident
+%type <strl> strlist
+%type <strl> maybe_strlist
+%type <strl> end_strlist
+%type <con> size
+%type <datum> bftype
+%type <strl> qualified_ident
+%type <strl> qualified_ident_nodbldot
+%type <decl> qualified_decl
+%type <decl> maybe_qualified_decl
+%type <decl> anon_decl
+%type <idl> qualified_idlist
+%type <boolean> maybe_dot_star
+%type <strl> inherit_struct
+%type <str> struct
+%type <bf> bitfield
+%type <en> enum
+%type <iface> iface
+%type <idl> inherit_ifaces
+%type <boolean> typedef_or_alias_keyword
+%type <guid> guid
+%type <struct_decl_attr> struct_decl_and_attr
+%type <ids_attr> ids_attr
+
+%%
+
+input:
+ /* empty */
+| input one_input
+;
+
+one_input_real:
+ using ';'
+| namespace
+| enum ';' {}
+| bitfield ';' {}
+| struct ';' {}
+| iface ';' {}
+| typedef_or_alias ';' {}
+| const_datum ';' {}
+;
+
+one_input:
+ one_input_real
+;
+
+namespace_body:
+ ';' {
+ NameSpace *ret = add_nspace(nspace_name, false);
+ nspace_name = NULL;
+
+ if (!ret)
+ do_yyerror();
+ }
+| '{' {
+ NameSpace *ret = add_nspace(nspace_name, true);
+ nspace_name = NULL;
+
+ if (!ret)
+ do_yyerror();
+ } input '}' {
+ pop_nspace();
+ }
+| {
+ NameSpace *ret = add_nspace(nspace_name, true);
+ nspace_name = NULL;
+
+ if (!ret)
+ do_yyerror();
+ } one_input {
+ pop_nspace();
+ }
+;
+
+namespace:
+ TOK_NAMESPACE qualified_ident {
+ nspace_name = $2;
+ } namespace_body
+;
+
+const_datum:
+ TOK_CONST type ideqs {
+ ConList *cl = $3;
+
+ for (ConList::iterator i = cl->begin(); i != cl->end(); ++i)
+ Datum::declare((*i).str, cur_nspace, $2.type, $2.array, &(*i).con);
+ }
+;
+
+ids_attr:
+ /* empty */ {
+ $$.ids = NULL;
+ $$.attr = NULL;
+ }
+| ids maybe_strlist {
+ $$.ids = $1;
+ $$.attr = $2;
+ }
+;
+
+struct_elem:
+ const_datum ';'
+| type ids maybe_strlist ';' {
+ declare_data(cur_nspace, $2, $1.type, $1.array, $3);
+ }
+| enum maybe_ids ';' {
+ if (!$2 && $1->name->token == TOK_ANON)
+ yyerrorfl($1->name->file, $1->name->line,
+ "An anonymous type must declare a datum.");
+
+ if ($2) {
+ StrList *strl = $1->get_fq_name();
+ strl->push_front(new String(""));
+ declare_data(cur_nspace, $2, strl, NULL, NULL);
+ }
+ }
+| bitfield maybe_ids ';' {
+ if (!$2 && $1->name->token == TOK_ANON)
+ yyerrorfl($1->name->file, $1->name->line,
+ "An anonymous type must declare a datum.");
+
+ if ($2) {
+ StrList *strl = $1->get_fq_name();
+ strl->push_front(new String(""));
+ declare_data(cur_nspace, $2, strl, NULL, NULL);
+ }
+ }
+| struct ids_attr ';' {
+ if (!$2.ids && $1->name->token == TOK_ANON)
+ yyerrorfl($1->name->file, $1->name->line,
+ "An anonymous type must declare a datum.");
+
+ if ($2.ids) {
+ StrList *strl = $1->get_fq_name();
+ strl->push_front(new String(""));
+ declare_data(cur_nspace, $2.ids, strl, NULL, $2.attr);
+ }
+ }
+| typedef_or_alias ';'
+| using ';'
+| guid ';' {
+ Struct *str = dynamic_cast<Struct *>(*cur_nspace);
+ assert(str);
+
+ str->set_guid($1);
+ }
+;
+
+struct_body:
+ /* empty */
+| struct_elem struct_body
+;
+
+inherit_struct:
+ /* empty */ {
+ $$ = NULL;
+ }
+| ':' qualified_ident {
+ $$ = $2;
+ }
+;
+
+struct_decl_and_attr:
+ anon_decl {
+ // Anonymous structs have no attributes.
+ $$.decl = $1;
+ $$.attr = new StrList;
+ }
+| qualified_decl maybe_strlist {
+ $$.decl = $1;
+ $$.attr = $2;
+ }
+;
+
+struct:
+ TOK_STRUCT struct_decl_and_attr inherit_struct '{' {
+ $<str>$ = Struct::declare($2.decl.ident, $2.decl.ns, $3);
+
+ for (StrList::iterator i = $2.attr->begin(); i != $2.attr->end(); ++i) {
+ const String *attr = *i;
+
+ switch (attr->token) {
+ case TOK_VIRTUAL:
+ $<str>$->set_virtual();
+ break;
+
+ case TOK_INLINE:
+ $<str>$->set_inline();
+ break;
+
+ default:
+ yyerrorfl(attr->file, attr->line, "Invalid attribute \"%s\"",
+ (*i)->c_str());
+ }
+ }
+
+ nspace_stack.push_front(cur_nspace);
+ cur_nspace = $<str>$;
+ }
+
+ struct_body '}' {
+ // One for the struct's namespace, and one for the
+ // namespace it was declared in.
+ pop_nspace();
+ pop_nspace();
+ $$ = $<str>5;
+ }
+;
+
+param:
+ type strlist {
+ const String *name = $2->front();
+ $2->pop_front();
+
+ CompiledParam::Flags flags = {};
+ int dirs = 0;
+
+ flags.field.In = 1;
+
+ for (StrList::iterator i = $2->begin(); i != $2->end(); ++i) {
+ const String *attr = *i;
+
+ switch (attr->token) {
+ case TOK_OUT:
+ if (dirs++ > 0) {
+ yyerrorf("Only one direction attribute may be given.");
+ } else {
+ flags.field.In = 0;
+ flags.field.Out = 1;
+ }
+
+ break;
+
+ case TOK_INOUT:
+ if (dirs++ > 0) {
+ yyerrorf("Only one direction attribute may be given.");
+ } else {
+ flags.field.In = 1;
+ flags.field.Out = 1;
+ }
+
+ break;
+
+ case TOK_SHARED:
+ flags.field.Shared = 1;
+ break;
+
+ case TOK_PUSH:
+ flags.field.Push = 1;
+ break;
+
+ case TOK_INLINE:
+ flags.field.Inline = 1;
+ break;
+
+ case TOK_IMMUTABLE:
+ flags.field.Immutable = 1;
+ break;
+
+ default:
+ yyerrorfl(attr->file, attr->line,
+ "Invalid attribute \"%s\"", (*i)->c_str());
+ }
+ }
+
+ Param::declare(name, cur_nspace, $1.type, flags, $1.array);
+ }
+;
+
+more_params:
+ /* empty */
+| ',' param_list
+;
+
+param_list:
+ /* empty */
+| param more_params
+;
+
+method:
+ ident '(' {
+ $<meth>$ = Method::declare($1, cur_nspace);
+
+ nspace_stack.push_front(cur_nspace);
+ cur_nspace = $<meth>$;
+ } param_list ')' maybe_strlist {
+ for (StrList::iterator i = $6->begin(); i != $6->end(); ++i) {
+ const String *attr = *i;
+
+ switch (attr->token) {
+ case TOK_ASYNC:
+ $<meth>3->set_async();
+ break;
+
+ default:
+ yyerrorfl(attr->file, attr->line,
+ "Invalid attribute \"%s\".", (*i)->c_str());
+ }
+ }
+ pop_nspace();
+ }
+;
+
+iface_body:
+ /* empty */
+| method ';' iface_body
+| enum ';' iface_body {}
+| bitfield ';' iface_body {}
+| struct ';' iface_body {}
+| iface ';' iface_body {}
+| typedef_or_alias ';' iface_body
+| const_datum ';' iface_body
+| guid ';' iface_body {
+ Interface *iface = dynamic_cast<Interface *>(*cur_nspace);
+ assert(iface);
+
+ iface->set_guid($1);
+ }
+| using ';' iface_body
+;
+
+guid:
+ TOK_GUID ':' TOK_STR {
+ parse_guid($3->c_str(), (char *)$$);
+ }
+;
+
+
+inherit_ifaces:
+ /* empty */ {
+ $$ = new IDList;
+ }
+| ':' qualified_idlist {
+ $$ = $2;
+ }
+;
+
+iface:
+ TOK_IFACE qualified_decl inherit_ifaces {
+ $<iface>$ = Interface::declare($2.ident, $2.ns, $3);
+
+ nspace_stack.push_front(cur_nspace);
+ cur_nspace = $<iface>$;
+ } '{' iface_body '}' {
+ pop_nspace();
+ pop_nspace();
+ $$ = $<iface>3;
+ }
+;
+
+maybe_namespace:
+ /* empty */
+ | TOK_NAMESPACE
+;
+
+using_list_entry:
+ maybe_namespace qualified_ident {
+ const String *end = $2->back();
+ bool ns = false;
+
+ if (end->token == '*') {
+ ns = true;
+ $2->pop_back();
+ }
+
+ if (ns) {
+ cur_nspace->add_search($2);
+ } else {
+ Alias::declare(end, cur_nspace, $2, true);
+ }
+ }
+;
+
+using_list:
+ using_list_entry
+ | using_list_entry ',' using_list
+;
+
+using: TOK_USING using_list
+;
+
+ids_body:
+ ident {
+ cur_strlist->push_back($1);
+ }
+| ids_body ',' ident {
+ cur_strlist->push_back($3);
+ }
+;
+
+ids:
+ ids_body {
+ $$ = cur_strlist;
+ cur_strlist = new StrList;
+ }
+;
+
+maybe_ids:
+ /* empty */ {
+ $$ = NULL;
+ }
+| ids {
+ $$ = $1;
+ }
+;
+
+coninit:
+ ident '=' const {
+ cur_conlist->push_back(ConInit($1, $3));
+ }
+;
+
+/* ideqs is ids with a constant initializer for each element. */
+
+ideqs_body:
+ coninit
+| ids_body ',' coninit
+;
+
+ideqs:
+ ideqs_body {
+ $$ = cur_conlist;
+ cur_conlist = new ConList;
+ }
+;
+
+ident:
+ TOK_IDENT
+| TOK_ASYNC {
+ $$ = new String("async", cur_input_file, curline, TOK_ASYNC);
+ }
+| TOK_INOUT {
+ $$ = new String("inout", cur_input_file, curline, TOK_INOUT);
+ }
+| TOK_OUT {
+ $$ = new String("out", cur_input_file, curline, TOK_OUT);
+ }
+| TOK_SHARED {
+ $$ = new String("shared", cur_input_file, curline, TOK_SHARED);
+ }
+| TOK_PUSH {
+ $$ = new String("push", cur_input_file, curline, TOK_PUSH);
+ }
+| TOK_SHORT {
+ $$ = new String("short", cur_input_file, curline, TOK_SHORT);
+ }
+| TOK_INT {
+ $$ = new String("int", cur_input_file, curline, TOK_INT);
+ }
+| TOK_LONG {
+ $$ = new String("long", cur_input_file, curline, TOK_LONG);
+ }
+| TOK_USHORT {
+ $$ = new String("ushort", cur_input_file, curline, TOK_USHORT);
+ }
+| TOK_UINT {
+ $$ = new String("uint", cur_input_file, curline, TOK_UINT);
+ }
+| TOK_ULONG {
+ $$ = new String("ulong", cur_input_file, curline, TOK_ULONG);
+ }
+| TOK_CHAR {
+ $$ = new String("char", cur_input_file, curline, TOK_CHAR);
+ }
+| TOK_OCTET {
+ $$ = new String("octet", cur_input_file, curline, TOK_OCTET);
+ }
+| TOK_FSHORT {
+ $$ = new String("fshort", cur_input_file, curline, TOK_FSHORT);
+ }
+| TOK_FLONG {
+ $$ = new String("flong", cur_input_file, curline, TOK_FLONG);
+ }
+| TOK_BOOL {
+ $$ = new String("bool", cur_input_file, curline, TOK_BOOL);
+ }
+| TOK_METHOD {
+ $$ = new String("method", cur_input_file, curline, TOK_METHOD);
+ }
+| TOK_NAME {
+ $$ = new String("name", cur_input_file, curline, TOK_NAME);
+ }
+| TOK_COPY {
+ $$ = new String("copy", cur_input_file, curline, TOK_COPY);
+ }
+| TOK_CLASS {
+ $$ = new String("class", cur_input_file, curline, TOK_CLASS);
+ }
+| TOK_GUID {
+ $$ = new String("guid", cur_input_file, curline, TOK_GUID);
+ }
+| TOK_STATIC {
+ $$ = new String("static", cur_input_file, curline, TOK_STATIC);
+ }
+| TOK_VIRTUAL {
+ $$ = new String("virtual", cur_input_file, curline, TOK_VIRTUAL);
+ }
+| TOK_INLINE {
+ $$ = new String("inline", cur_input_file, curline, TOK_INLINE);
+ }
+| TOK_IMMUTABLE {
+ $$ = new String("immutable", cur_input_file, curline, TOK_IMMUTABLE);
+ }
+;
+
+strlist_body:
+ ident {
+ cur_strlist->push_back($1);
+ }
+| strlist_body ident {
+ cur_strlist->push_back($2);
+ }
+;
+
+end_strlist: {
+ $$ = cur_strlist;
+ cur_strlist = new StrList;
+ }
+;
+
+strlist:
+ strlist_body end_strlist {
+ $$ = $2;
+ }
+;
+
+maybe_strlist:
+ end_strlist
+| strlist
+;
+
+qualified_ident_raw:
+ ident {
+ cur_strlist->push_back($1);
+ }
+| qualified_ident_raw '.' ident {
+ cur_strlist->push_back($3);
+ }
+;
+
+maybe_dot_star:
+ /* empty */ {
+ $$ = false;
+ }
+| '.' '*' {
+ $$ = true;
+ }
+;
+
+qualified_ident_nodbldot:
+ qualified_ident_raw maybe_dot_star {
+ if ($2)
+ cur_strlist->push_back(new String("*", cur_input_file,
+ curline, '*'));
+
+ $$ = cur_strlist;
+ cur_strlist = new StrList;
+ }
+;
+
+qualified_ident:
+ TOK_2DOT {
+ cur_strlist->push_front(new String("", cur_input_file,
+ curline, TOK_IDENT));
+ } qualified_ident_nodbldot {
+ $$ = $3;
+ }
+| qualified_ident_nodbldot
+;
+
+qualified_decl:
+ qualified_ident_raw {
+ $$.ident = cur_strlist->back();
+ $$.ident->retain();
+
+ cur_strlist->pop_back();
+
+ if (!cur_strlist->empty())
+ $$.ns = add_nspace(cur_strlist, true);
+ else {
+ $$.ns = cur_nspace;
+ nspace_stack.push_front(cur_nspace);
+ }
+
+ cur_strlist = new StrList;
+
+ if (!$$.ns)
+ do_yyerror();
+ }
+;
+
+anon_decl: {
+ char buf[32];
+ snprintf(buf, 32, "_anon_%u", cur_nspace->anon++);
+
+ $$.ns = cur_nspace;
+ $$.ident = new String(buf, cur_input_file, curline, TOK_ANON);
+ nspace_stack.push_front(cur_nspace);
+ }
+;
+
+maybe_qualified_decl:
+ anon_decl
+| qualified_decl
+;
+
+qualified_ids:
+ qualified_ident {
+ cur_idlist->push_back($1);
+ }
+| qualified_ids ',' qualified_ident {
+ cur_idlist->push_back($3);
+ }
+;
+
+qualified_idlist:
+ qualified_ids {
+ $$ = cur_idlist;
+ cur_idlist = new IDList;
+ }
+;
+
+enum:
+ TOK_ENUM maybe_qualified_decl size '{' {
+ int bits = 32;
+
+ if ($3.type == TOK_UCON)
+ bits = $3.con.ucon;
+
+ $<en>$ = Enum::declare($2.ident, $2.ns, bits);
+
+ nspace_stack.push_front(cur_nspace);
+ cur_nspace = $<en>$;
+ } enuments '}' {
+ pop_nspace();
+ pop_nspace();
+ $$ = $<en>5;
+ }
+;
+
+more_enuments:
+ /* empty */
+| ',' enuments
+;
+
+enuments:
+ /* empty */
+| ident {
+ Enum *en = dynamic_cast<Enum *>(*cur_nspace);
+ assert(en);
+
+ CompiledBasicType bt = { bits: en->def.bits };
+
+ // GCC can't handle complex labelled initializers as of 3.3;
+ // plus, the syntax gets kind of ugly.
+
+ bt.flags.field.Unsigned = 1;
+
+ Con con;
+ con.type = TOK_UCON;
+ con.con.ucon = en->next_val++;
+
+ Datum::declare($1, en, bt, NULL, &con);
+ } more_enuments
+;
+
+bitfield:
+ TOK_BITFIELD maybe_qualified_decl size '{' {
+ int bits = 32;
+
+ if ($3.type == TOK_UCON)
+ bits = $3.con.ucon;
+
+ $<bf>$ = BitField::declare($2.ident, $2.ns, bits);
+
+ nspace_stack.push_front(cur_nspace);
+ cur_nspace = $<bf>$;
+ } maybe_bfelems '}' {
+ pop_nspace();
+ pop_nspace();
+ $$ = $<bf>5;
+ }
+;
+
+size:
+ /* empty */ {
+ $$.type = TOK_NONE;
+ }
+| ':' const {
+ $$ = $2;
+
+ if ($$.type == TOK_DCON) {
+ // FIXME: support symbolic sizes
+ yyerrorf("Symbolic sizes are currently unsupported.\n");
+ $$.type = TOK_INVALID;
+ } else if (!(($$.type == TOK_UCON && $$.con.ucon > 0) ||
+ ($$.type == TOK_ICON && $$.con.icon > 0)))
+ {
+ yyerrorf("Sizes must be positive integers.");
+
+ $$.type = TOK_INVALID;
+ $$.con.ucon = 0;
+ } else {
+ // Promote signed to unsigned.
+ $$.type = TOK_UCON;
+ }
+ }
+;
+
+bfelems:
+ bfelem {
+ $$ = new SymList;
+ $$->push_back($1);
+ }
+| bfelems ',' bfelem {
+ $1->push_back($3);
+ }
+;
+
+maybe_comma:
+ /* empty */
+ | ','
+;
+
+maybe_bfelems:
+ /* empty */ {
+ $$ = new SymList;
+ }
+ | bfelems maybe_comma
+;
+
+bftype:
+ ident {
+ CompiledBasicType bt = {};
+ bt.flags.field.Unsigned = 1;
+ bt.bits = dynamic_cast<BitField *>(*cur_nspace)->def.bits;
+
+ $$ = Datum::declare($1, cur_nspace, bt, NULL);
+ }
+| basictype ident {
+ $$ = Datum::declare($2, cur_nspace, $1, NULL);
+ }
+| enum ident {
+ StrList *strl = $1->get_fq_name();
+ strl->push_front(new String(""));
+ $$ = Datum::declare($2, cur_nspace, strl, NULL);
+ }
+| bitfield ident {
+ StrList *strl = $1->get_fq_name();
+ strl->push_front(new String(""));
+ $$ = Datum::declare($2, cur_nspace, strl, NULL);
+ }
+;
+
+bfelem: bftype size {
+ $$ = $1;
+
+ if ($2.type == TOK_UCON)
+ $$->def.ucon = $2.con.ucon;
+ else
+ $$->def.icon = -1;
+ }
+;
+
+basictype:
+ qualified_ident {
+ $$ = $1;
+ }
+;
+
+type:
+ basictype {
+ $$.type = $1;
+ $$.array = NULL;
+ }
+| basictype '[' arraybounds ']' {
+ $$.type = $1;
+ $$.array = $3;
+ }
+;
+
+maybeconst:
+ /* empty */ {
+ $$.type = TOK_NONE;
+ }
+| const {
+ $$ = $1;
+ }
+;
+
+const:
+ constnominus { $$ = $1; }
+| '-' constnominus {
+
+ switch($2.type) {
+ case TOK_UCON:
+ yyerrorf("The constant %" PRIu64 " is too large to be negated.",
+ $2.con.ucon);
+ do_yyerror();
+ break;
+
+ case TOK_ICON:
+ $$.con.icon = -$2.con.icon;
+ break;
+
+ case TOK_FCON:
+ $$.con.fcon = -$2.con.fcon;
+ }
+
+ $$.type = $2.type;
+ }
+| TOK_FALSE {
+ $$.type = TOK_BOOL;
+ $$.con.ucon = 0;
+ }
+| TOK_TRUE {
+ $$.type = TOK_BOOL;
+ $$.con.ucon = 1;
+ }
+;
+
+constnominus:
+ TOK_ICON { $$ = $1; }
+| TOK_UCON { $$ = $1; }
+| TOK_FCON { $$ = $1; }
+| TOK_INVALID { $$ = $1; }
+| qualified_ident {
+ assert($1);
+ $$.type = TOK_DCON;
+ $$.con.dcon = $1;
+ }
+;
+
+arraybounds:
+ /* empty */ {
+ $$ = new Array(cur_nspace);
+ $$->set_unbounded();
+ }
+| const {
+ $$ = new Array(cur_nspace);
+ $$->set_bound($1, 0);
+ $$->set_bound($1, 1);
+ }
+| maybeconst TOK_3DOT maybeconst {
+ $$ = new Array(cur_nspace);
+ $$->set_bound($1, 0);
+ $$->set_bound($3, 1);
+ }
+;
+
+typedef_or_alias_keyword:
+ TOK_TYPEDEF {
+ $$ = true;
+ }
+| TOK_ALIAS {
+ $$ = false;
+ }
+;
+
+typedef_or_alias:
+ typedef_or_alias_keyword basictype ids {
+ Type *t = lookup_type($2, NULL, true);
+ BasicType *bt = dynamic_cast<BasicType *>(t);
+
+ if (bt) {
+ // It's a basic type, so a symbolic typedef won't work.
+ declare_basictypes(cur_nspace, $3, bt, $1);
+ } else {
+ declare_aliases(cur_nspace, $3, $2, $1);
+ }
+ }
+;
+
+%%
+
+void setup_idlparse()
+{
+ yylval_con = &idl_lval.con;
+ yylval_string = &idl_lval.string;
+
+ // These are kept in an initialized state so as to avoid the need
+ // for some early actions, thus eliminating some conflicts that
+ // would otherwise have caused things like "guid" to need to be
+ // reserved words.
+
+ cur_strlist = new StrList;
+ cur_conlist = new ConList;
+ cur_idlist = new IDList;
+}
--- /dev/null
+/* input.cc -- Code to load symbols from a legacy filesystem
+ *
+ * Written by Scott Wood <scott@buserror.net>
+ */
+
+#include <cstdio>
+#include <vector>
+#include <memory>
+#include <cstring>
+
+#include <errno.h>
+#include <dirent.h>
+
+#include <idlc.h>
+#include <parser.h>
+
+// To deal with namespaces on a case-insensitive filesystem, names
+// are annotated with a hexadecimal prefix indicating the case of each
+// letter. Each bit in the number represents a letter, with 0 being
+// lowercase (or a non-letter) and 1 being uppercase. The
+// least-significant bit is the first letter. This is done on both
+// case sensitive and case insensitive filesystems so that the output
+// can be freely copied between them (it also simplifies the code).
+// As such, hex digits should be in upper case so that case-sensitive
+// lookups can be used. This prefix is separated from the actual name
+// by an underscore.
+//
+// When the time comes along that UTF-8 is supported, this applies to
+// UTF-8 characters, not ASCII characters.
+//
+// This mangling can also be used (with a leading underscore or
+// whatever) in case-insensitive languages, though ideally there
+// should be aliases with the unannotated name if there is no
+// conflict.
+//
+// I don't want to go the CORBA route of disallowing case conflicts,
+// as I find using capitals for types and lowercase for instances to
+// be a useful convention (and less ugly than appending _t, except
+// when case sensitivity is missing), and would rather not be limited
+// at such a fundamental level by broken languages.
+
+static char tohex(int digit)
+{
+ if (digit < 10)
+ return digit + '0';
+
+ return digit + 'A' - 10;
+}
+
+const String *NameSpace::mangle(const String *name)
+{
+ String *mangled_string = new String;
+
+ // The conversion is done manually so that I don't have to deal
+ // with bignum arithmetic.
+
+ std::vector<bool> hex;
+ std::vector<bool>::size_type name_len = name->length();
+
+ // Pad out the first digit with leading zeroes, so that
+ // we don't have to deal with that later.
+
+ if (name_len & 3)
+ for (int i = name_len & 3; i < 4; ++i)
+ hex.push_back(0);
+
+ for (string::const_iterator i = name->begin(); i != name->end(); ++i)
+ hex.push_back(isupper(*i));
+
+ assert((hex.size() & 3) == 0);
+
+ for (std::vector<bool>::size_type i = 0; i < hex.size(); i += 4) {
+ int digit = hex[i] * 8 +
+ hex[i + 1] * 4 +
+ hex[i + 2] * 2 +
+ hex[i + 3];
+
+ *mangled_string += tohex(digit);
+ }
+
+ *mangled_string += '_';
+ *mangled_string += *name;
+ return mangled_string;
+}
+
+struct IOError
+{
+ const char *msg;
+
+ IOError(FILE *f)
+ {
+ if (ferror(f))
+ msg = strerror(ferror(f));
+ else
+ msg = "Short File";
+ }
+};
+
+struct ImportContext {
+ const String *name;
+ const char *filename;
+ File f;
+ NameSpace *parent;
+ bool swap;
+ bool is_dir;
+ bool is_typedef;
+ CompiledDefHeader hdr;
+};
+
+static void import_and_validate_array(ImportContext &ctx,
+ CompiledArray &out,
+ CompiledArray &in)
+{
+ out.bounds[0] = swap64(in.bounds[0], ctx.swap);
+ out.bounds[1] = swap64(in.bounds[1], ctx.swap);
+
+ if (out.bounds[0] < 0) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(bad array lower bound %" PRId64 ").\n",
+ ctx.filename, out.bounds[0]);
+ throw UserError();
+ }
+
+ if (out.bounds[1] < -1) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(bad array upper bound %" PRId64 ").\n",
+ ctx.filename, out.bounds[0]);
+ throw UserError();
+ }
+
+ if (out.bounds[1] > 0 &&
+ out.bounds[1] < out.bounds[0]) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(array upper bound %" PRId64 " less than lower bound %" PRId64 ").\n",
+ ctx.filename, out.bounds[1], out.bounds[0]);
+ throw UserError();
+ }
+}
+
+static void import_and_validate_basictype(ImportContext &ctx,
+ CompiledBasicType &out,
+ CompiledBasicType &in)
+{
+ out.bits = swap32(in.bits, ctx.swap);
+ out.flags.raw = swap32(in.flags.raw, ctx.swap);
+
+ int num = 0;
+ if (out.flags.field.Unsigned)
+ num++;
+ if (out.flags.field.Float)
+ num++;
+ if (out.flags.field.Bool)
+ num++;
+ if (num > 1) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(bad flags 0x%08x).\n",
+ ctx.filename, out.flags.raw);
+ throw UserError();
+ }
+
+ if (out.flags.field.Bool) {
+ if (out.bits != 0) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(bad bits %d for bool type).\n",
+ ctx.filename, out.bits);
+ throw UserError();
+ }
+ } else if (out.flags.field.Float) {
+ if (out.bits != 32 && out.bits != 64) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(bad bits %d for floating point type).\n",
+ ctx.filename, out.bits);
+ throw UserError();
+ }
+ } else if (!dynamic_cast<BitField *>(ctx.parent) &&
+ !dynamic_cast<Enum *>(ctx.parent)) {
+ if (out.bits != 8 && out.bits != 16 &&
+ out.bits != 32 && out.bits != 64) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(bad bits %d for integer type).\n",
+ ctx.filename, out.bits);
+ throw UserError();
+ }
+
+ if (out.bits == 8 && !out.flags.field.Unsigned) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(octet type must be unsigned).\n",
+ ctx.filename);
+ throw UserError();
+ }
+ }
+
+ // Bitfield entries can take anywhere from 1 to 64 bits, but
+ // will be verified by the parent after all entries have loaded,
+ // so it can verify that the sum does not exceed the size of
+ // the bitfield.
+
+ import_and_validate_array(ctx, out.array, in.array);
+}
+
+BasicType *BasicType::import(ImportContext &ctx)
+{
+ if (ctx.is_dir) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(a BasicType must be a file).\n",
+ ctx.filename);
+ throw UserError();
+ }
+
+ CompiledBasicType def;
+ if (fread(&def, sizeof(def), 1, ctx.f) != 1)
+ throw IOError(ctx.f);
+
+ BasicType *bt = new BasicType(ctx.name);
+ ctx.parent->add_import(bt, ctx.filename);
+
+ import_and_validate_basictype(ctx, bt->def, def);
+
+ bt->complete = true;
+ return bt;
+}
+
+static String *read_string_raw(ImportContext &ctx, int32_t length)
+{
+ if (length < 0 || length > 4096) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(unreasonable string length %d).\n",
+ ctx.filename, length);
+ throw UserError();
+ }
+
+ // Is there a way to reserve space in a C++ string, and pass
+ // the buffer into fread?
+
+ char *buf = new char[length + 1];
+
+ if (fread(buf, length + 1, 1, ctx.f) != 1)
+ throw IOError(ctx.f);
+
+ String *s = new String(buf);
+ delete[] buf;
+ return s;
+}
+
+static const String *read_string(ImportContext &ctx)
+{
+ off_t pos = ftello(ctx.f);
+ int pad = ((pos + 3) & ~3) - pos;
+
+ if (pad != 0 && fseeko(ctx.f, pad, SEEK_CUR) != 0)
+ throw IOError(ctx.f);
+
+ int32_t length;
+ if (fread(&length, sizeof(length), 1, ctx.f) != 1)
+ throw IOError(ctx.f);
+
+ return read_string_raw(ctx, swap32(length, ctx.swap));
+}
+
+StrList::StrList(const String *input, char delimeter)
+{
+ const char *cur = input->c_str();
+
+ do {
+ char *next = strchr(cur, delimeter);
+ String *s = new String();
+ s->token = TOK_IDENT;
+
+ if (next == cur || *cur == 0)
+ throw InvalidArgument();
+
+ if (next) {
+ s->append(cur, next - cur);
+ cur = next + 1;
+ } else {
+ s->append(cur);
+ cur = NULL;
+ }
+
+ push_back(s);
+ } while (cur);
+}
+
+// FIXME: Handle illegal recursion.
+static Symbol *lookup_sym_noyyerror(ImportContext &ctx, const String *name)
+{
+ StrList *sl;
+
+ try {
+ sl = new StrList(name);
+ }
+
+ catch (InvalidArgument) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(\"%s\" is not a valid identifier).\n",
+ ctx.filename, name->c_str());
+ throw UserError();
+ }
+
+ try {
+ return lookup_sym(toplevel, sl, toplevel);
+ }
+
+ catch (UserError) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(\"%s\" could not be loaded).\n",
+ ctx.filename, name->c_str());
+
+ throw UserError();
+ }
+}
+
+// FIXME: Handle illegal recursion.
+static Symbol *lookup_sym_in_ns_noyyerror(ImportContext &ctx,
+ NameSpace *ns,
+ const String *name)
+{
+ Symbol *sym = ns->lookup_noex(name);
+ if (!sym) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(\"%s\" is not found).\n",
+ ctx.filename, name->c_str());
+
+ throw UserError();
+ }
+
+ return sym;
+}
+
+Datum *Datum::import(ImportContext &ctx)
+{
+ if (ctx.is_dir) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(a Datum must be a file).\n",
+ ctx.filename);
+ throw UserError();
+ }
+
+ CompiledDatum def;
+ if (fread(&def, sizeof(def), 1, ctx.f) != 1)
+ throw IOError(ctx.f);
+
+ Datum *d = new Datum(ctx.name);
+ d->def.flags.raw = swap32(def.flags.raw, ctx.swap);
+
+ if (d->def.flags.field.Invalid) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(bad flags 0x%08x).\n",
+ ctx.filename, d->def.flags.raw);
+ throw UserError();
+ }
+
+ ctx.parent->add_import(d, ctx.filename);
+
+ d->def.type.length = swap32(def.type.length, ctx.swap);
+
+ if (d->def.type.length != 0) {
+ d->type_fq_name = read_string_raw(ctx, d->def.type.length);
+ Symbol *sym = lookup_sym_noyyerror(ctx, d->type_fq_name);
+ d->type = dynamic_cast<Type *>(sym);
+
+ if (!d->type) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(\"%s\" is not a type).\n",
+ ctx.filename, d->type_fq_name->c_str());
+ throw UserError();
+ }
+
+ import_and_validate_array(ctx, d->def.basictype.array,
+ def.basictype.array);
+
+ BasicType *bt = dynamic_cast<BasicType *>(*d->type);
+ if (bt)
+ d->cbt = &bt->def;
+ else
+ d->cbt = NULL;
+ } else {
+ d->basic = true;
+ import_and_validate_basictype(ctx, d->def.basictype,
+ def.basictype);
+ d->cbt = &d->def.basictype;
+ }
+
+ d->def.ucon = swap64(def.ucon, ctx.swap);
+
+ if (d->def.flags.field.Const) {
+ if (!d->cbt) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(constant, but non-basic type).\n",
+ ctx.filename);
+ throw UserError();
+ }
+
+ if (d->cbt->flags.field.Float)
+ d->con_type = TOK_FCON;
+ else if (!d->cbt->flags.field.Unsigned)
+ d->con_type = TOK_ICON;
+ else
+ d->con_type = TOK_UCON;
+
+ if (!d->verify_const()) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(bad constant).\n",
+ ctx.filename);
+ throw UserError();
+ }
+ } else {
+ d->con_type = TOK_NONE;
+ }
+
+ return d;
+}
+
+// OPT: Importing methods and supers isn't necessary for idlc to
+// work; it could be made optional to speed up large compilations.
+
+Interface *Interface::import(ImportContext &ctx)
+{
+ if (!ctx.is_dir) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(an Interface must be a directory).\n",
+ ctx.filename);
+ throw UserError();
+ }
+
+ CompiledInterface def;
+ if (fread(&def, sizeof(def), 1, ctx.f) != 1)
+ throw IOError(ctx.f);
+
+ Interface *iface = new Interface(ctx.name);
+ iface->path = new String(ctx.filename);
+ ctx.parent->add_import(iface, ctx.filename);
+
+ iface->def.guid[0] = def.guid[0];
+ iface->def.guid[1] = def.guid[1];
+
+ int32_t num_methods = swap32(def.num_methods, ctx.swap);
+ int32_t num_supers = swap32(def.num_supers, ctx.swap);
+
+ if (num_methods < 0 || num_methods > 4096) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(unreasonable num_methods %d).\n",
+ ctx.filename, num_methods);
+ throw UserError();
+ }
+
+ if (num_supers < 0 || num_supers > 4096) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(unreasonable num_supers %d).\n",
+ ctx.filename, num_supers);
+ throw UserError();
+ }
+
+ for (int32_t i = 0; i < num_methods; i++) {
+ const String *str = read_string(ctx);
+ Symbol *sym = lookup_sym_in_ns_noyyerror(ctx, iface, str);
+ Method *m = dynamic_cast<Method *>(sym);
+
+ if (!m) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(\"%s\" is not a method).\n",
+ ctx.filename, str->c_str());
+ throw UserError();
+ }
+
+ iface->add_elem(m);
+ }
+
+ // FIXME: Check for bad recursion again
+
+ for (int32_t i = 0; i < num_supers; i++) {
+ const String *str = read_string(ctx);
+ Symbol *sym = lookup_sym_noyyerror(ctx, str);
+ Interface *super = dynamic_cast<Interface *>(sym);
+
+ if (!super) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(\"%s\" is not an interface).\n",
+ ctx.filename, str->c_str());
+ throw UserError();
+ }
+
+ iface->add_super(super);
+ }
+
+ assert(num_methods == iface->def.num_methods);
+ assert(num_supers == iface->def.num_supers);
+
+ iface->sort_chains();
+ return iface;
+}
+
+Method *Method::import(ImportContext &ctx)
+{
+ if (!ctx.is_dir) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(a Method must be a directory).\n",
+ ctx.filename);
+ throw UserError();
+ }
+
+ CompiledMethod def;
+ if (fread(&def, sizeof(def), 1, ctx.f) != 1)
+ throw IOError(ctx.f);
+
+ Method *m = new Method(ctx.name);
+ m->path = new String(ctx.filename);
+ ctx.parent->add_import(m, ctx.filename);
+
+ int32_t num_entries = swap32(def.num_entries, ctx.swap);
+
+ m->def.flags.raw = swap32(def.flags.raw, ctx.swap);
+
+ if (num_entries < 0 || num_entries > 4096) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(unreasonable num_entries %d).\n",
+ ctx.filename, num_entries);
+ throw UserError();
+ }
+
+ for (int32_t i = 0; i < num_entries; i++) {
+ const String *str = read_string(ctx);
+ Symbol *sym = lookup_sym_in_ns_noyyerror(ctx, m, str);
+ Param *p = dynamic_cast<Param *>(sym);
+
+ if (!p) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(\"%s\" is not a parameter).\n",
+ ctx.filename, str->c_str());
+ throw UserError();
+ }
+
+ m->add_elem(p);
+ }
+
+ assert(num_entries == m->def.num_entries);
+ return m;
+}
+
+Param *Param::import(ImportContext &ctx)
+{
+ if (ctx.is_dir) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(a Parameter must be a file).\n",
+ ctx.filename);
+ throw UserError();
+ }
+
+ CompiledParam def;
+ if (fread(&def, sizeof(def), 1, ctx.f) != 1)
+ throw IOError(ctx.f);
+
+ Param *p = new Param(ctx.name);
+ ctx.parent->add_import(p, ctx.filename);
+
+ p->def.type.length = swap32(def.type.length, ctx.swap);
+
+ if (p->def.type.length != 0) {
+ p->type_fq_name = read_string_raw(ctx, p->def.type.length);
+ Symbol *sym = lookup_sym_noyyerror(ctx, p->type_fq_name);
+ p->type = dynamic_cast<Type *>(sym);
+
+ if (!p->type) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(\"%s\" is not a type).\n",
+ ctx.filename, p->type_fq_name->c_str());
+ throw UserError();
+ }
+
+ import_and_validate_array(ctx, p->def.basictype.array,
+ def.basictype.array);
+ } else {
+ import_and_validate_basictype(ctx, p->def.basictype,
+ def.basictype);
+ }
+
+ p->def.flags.raw = swap32(def.flags.raw, ctx.swap);
+ return p;
+}
+
+// OPT: Importing data and super isn't necessary for idlc to
+// work; it could be made optional to speed up large compilations.
+
+Struct *Struct::import(ImportContext &ctx)
+{
+ if (!ctx.is_dir) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(a Struct must be a directory).\n",
+ ctx.filename);
+ throw UserError();
+ }
+
+ CompiledStruct def;
+ if (fread(&def, sizeof(def), 1, ctx.f) != 1)
+ throw IOError(ctx.f);
+
+ Struct *s = new Struct(ctx.name);
+ s->path = new String(ctx.filename);
+ ctx.parent->add_import(s, ctx.filename);
+
+ s->def.guid[0] = def.guid[0];
+ s->def.guid[1] = def.guid[1];
+
+ int32_t num_entries = swap32(def.num_entries, ctx.swap);
+
+ s->def.flags.raw = swap32(def.flags.raw, ctx.swap);
+
+ if (num_entries < 0 || num_entries > 4096) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(unreasonable num_entries %d).\n",
+ ctx.filename, num_entries);
+ throw UserError();
+ }
+
+ for (int32_t i = 0; i < num_entries; i++) {
+ const String *str = read_string(ctx);
+
+ Symbol *sym = lookup_sym_in_ns_noyyerror(ctx, s, str);
+ Datum *d = dynamic_cast<Datum *>(sym);
+
+ if (!d) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(\"%s\" is not a datum).\n",
+ ctx.filename, str->c_str());
+ throw UserError();
+ }
+
+ s->add_elem(d);
+ }
+
+ if (s->def.flags.field.Super) {
+ const String *str = read_string(ctx);
+ Symbol *sym = lookup_sym_noyyerror(ctx, str);
+ Struct *super = dynamic_cast<Struct *>(sym);
+
+ if (!super) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(\"%s\" is not a struct).\n",
+ ctx.filename, str->c_str());
+ throw UserError();
+ }
+
+ s->super = super;
+
+ if (super->is_virtual() && !s->is_virtual()) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(not virtual but parent is).\n",
+ ctx.filename);
+ throw UserError();
+ }
+ }
+
+ assert(num_entries == s->def.num_entries);
+ return s;
+}
+
+// OPT: Importing elements isn't necessary for idlc to work (at
+// least, not unless inheritance is implemented); it could be made
+// optional to speed up large compilations.
+
+BitField *BitField::import(ImportContext &ctx)
+{
+ if (!ctx.is_dir) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(a BitField must be a directory).\n",
+ ctx.filename);
+ throw UserError();
+ }
+
+ CompiledBitField def;
+ if (fread(&def, sizeof(def), 1, ctx.f) != 1)
+ throw IOError(ctx.f);
+
+ BitField *bf = new BitField(ctx.name);
+ bf->path = new String(ctx.filename);
+ ctx.parent->add_import(bf, ctx.filename);
+
+ int32_t num_entries = swap32(def.num_entries, ctx.swap);
+ bf->def.bits = swap32(def.bits, ctx.swap);
+
+ if (num_entries < 0 || num_entries > 4096) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(unreasonable num_entries %d).\n",
+ ctx.filename, num_entries);
+ throw UserError();
+ }
+
+ // FIXME: bits can only be 16, 32, or 64 when not in another bitfield.
+
+ if (bf->def.bits < 1 || bf->def.bits > 64) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(unreasonable bits %d).\n",
+ ctx.filename, num_entries);
+ throw UserError();
+ }
+
+ for (int32_t i = 0; i < num_entries; i++) {
+ const String *str = read_string(ctx);
+ Symbol *sym = lookup_sym_in_ns_noyyerror(ctx, bf, str);
+ Datum *d = dynamic_cast<Datum *>(sym);
+
+ if (!d) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(\"%s\" is not a datum).\n",
+ ctx.filename, str->c_str());
+ throw UserError();
+ }
+
+ bf->add_elem(d);
+ }
+
+ assert(num_entries == bf->def.num_entries);
+ return bf;
+}
+
+// OPT: Importing elements isn't necessary for idlc to work (at
+// least, not unless inheritance is implemented); it could be made
+// optional to speed up large compilations.
+
+Enum *Enum::import(ImportContext &ctx)
+{
+ if (!ctx.is_dir) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(a Enum must be a directory).\n",
+ ctx.filename);
+ throw UserError();
+ }
+
+ CompiledEnum def;
+ if (fread(&def, sizeof(def), 1, ctx.f) != 1)
+ throw IOError(ctx.f);
+
+ Enum *e = new Enum(ctx.name);
+ e->path = new String(ctx.filename);
+ ctx.parent->add_import(e, ctx.filename);
+
+ int32_t num_entries = swap32(def.num_entries, ctx.swap);
+ e->def.bits = swap32(def.bits, ctx.swap);
+
+ if (num_entries < 0 || num_entries > 4096) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(unreasonable num_entries %d).\n",
+ ctx.filename, num_entries);
+ throw UserError();
+ }
+
+ // FIXME: bits can only be 16, 32, or 64 when not in another bitfield.
+
+ if (e->def.bits < 1 || e->def.bits > 64) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(unreasonable bits %d).\n",
+ ctx.filename, num_entries);
+ throw UserError();
+ }
+
+ for (int32_t i = 0; i < num_entries; i++) {
+ const String *str = read_string(ctx);
+ Symbol *sym = lookup_sym_in_ns_noyyerror(ctx, e, str);
+ Datum *d = dynamic_cast<Datum *>(sym);
+
+ if (!d) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(\"%s\" is not a datum).\n",
+ ctx.filename, str->c_str());
+ throw UserError();
+ }
+
+ e->add_elem(d);
+ }
+
+ assert(num_entries == e->def.num_entries);
+ return e;
+}
+
+Alias *Alias::import(ImportContext &ctx)
+{
+ if (ctx.is_dir) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(an Alias must be a file).\n",
+ ctx.filename);
+ throw UserError();
+ }
+
+ Alias *a = new Alias(ctx.name);
+ ctx.parent->add_import(a, ctx.filename);
+
+ const String *str = read_string(ctx);
+ Symbol *sym = lookup_sym_noyyerror(ctx, str);
+
+ Alias *aptr = dynamic_cast<Alias *>(sym);
+ if (aptr) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(points to \"%s\", which is an alias).\n",
+ ctx.filename, str->c_str());
+ throw UserError();
+ }
+
+ a->real_sym = sym;
+ a->sym_fq_name = str;
+ a->def.length = str->length();
+
+ return a;
+}
+
+TypeDef *TypeDef::import(ImportContext &ctx)
+{
+ if (ctx.is_dir) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(a TypeDef must be a file).\n",
+ ctx.filename);
+ throw UserError();
+ }
+
+ TypeDef *td = new TypeDef(ctx.name);
+ ctx.parent->add_import(td, ctx.filename);
+
+ const String *str = read_string(ctx);
+ Symbol *sym = lookup_sym_noyyerror(ctx, str);
+
+ Alias *aptr = dynamic_cast<Alias *>(sym);
+ if (aptr) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(points to \"%s\", which is an alias).\n",
+ ctx.filename, str->c_str());
+ throw UserError();
+ }
+
+ td->real_sym = sym;
+ td->sym_fq_name = str;
+ td->def.length = str->length();
+
+ return td;
+}
+
+UserNameSpace *UserNameSpace::import(ImportContext &ctx)
+{
+ if (!ctx.is_dir) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(a NameSpace must be a directory).\n",
+ ctx.filename);
+ throw UserError();
+ }
+
+ UserNameSpace *uns = new UserNameSpace();
+ uns->path = new String(ctx.filename);
+
+ // FIXME: sanity check the mount point
+
+ uns->name = ctx.name;
+ ctx.parent->add_import(uns, ctx.filename);
+ return uns;
+}
+
+Symbol *do_load(ImportContext &ctx)
+{
+ int type = swap32(ctx.hdr.type, ctx.swap);
+
+ switch (type) {
+ case CompiledDefHeader::NameSpace:
+ return UserNameSpace::import(ctx);
+ break;
+
+ case CompiledDefHeader::BasicType:
+ return BasicType::import(ctx);
+ break;
+
+ case CompiledDefHeader::Datum:
+ return Datum::import(ctx);
+ break;
+
+ case CompiledDefHeader::Interface:
+ return Interface::import(ctx);
+ break;
+
+ case CompiledDefHeader::Method:
+ return Method::import(ctx);
+ break;
+
+ case CompiledDefHeader::Param:
+ return Param::import(ctx);
+ break;
+
+ case CompiledDefHeader::Struct:
+ return Struct::import(ctx);
+ break;
+
+ case CompiledDefHeader::Enum:
+ return Enum::import(ctx);
+ break;
+
+ case CompiledDefHeader::BitField:
+ return BitField::import(ctx);
+ break;
+
+ case CompiledDefHeader::Alias:
+ ctx.is_typedef = false;
+ return Alias::import(ctx);
+ break;
+
+ case CompiledDefHeader::TypeDef:
+ ctx.is_typedef = true;
+ return TypeDef::import(ctx);
+ break;
+
+ default:
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(bad type %08x).\n",
+ ctx.filename, type);
+
+ throw UserError();
+ }
+}
+
+Symbol *NameSpace::load(const String *symname)
+{
+ ImportContext ctx;
+ const String *mangled = mangle(symname);
+
+ ctx.name = new String(*symname);
+ ctx.is_dir = true;
+ ctx.parent = this;
+
+ string filename_no_self(path);
+ filename_no_self += '/';
+ filename_no_self += *mangled;
+ ctx.filename = filename_no_self.c_str();
+
+ string filename(filename_no_self);
+ filename.append("/.self");
+
+ ctx.f = fopen(filename.c_str(), "rb");
+ if (!ctx.f) {
+ ctx.is_dir = false;
+ ctx.f = fopen(ctx.filename, "rb");
+
+ if (!ctx.f)
+ return NULL;
+
+ filename = filename_no_self;
+ }
+
+ Symbol *ret;
+ try {
+ if (fread(&ctx.hdr, sizeof(ctx.hdr), 1, ctx.f) != 1)
+ throw IOError(ctx.f);
+
+ if (ctx.hdr.magic == CompiledDefHeader::magic_normal)
+ ctx.swap = false;
+ else if (ctx.hdr.magic == CompiledDefHeader::magic_reversed)
+ ctx.swap = true;
+ else {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(bad magic 0x%08x).\n",
+ ctx.filename, ctx.hdr.magic);
+
+ throw UserError();
+ }
+
+ ret = do_load(ctx);
+ }
+
+ catch (IOError &e) {
+ fprintf(stderr, "Cannot read from file \"%s\": %s.\n",
+ ctx.filename, e.msg);
+
+ throw UserError();
+ }
+
+ return ret;
+}
+
+NameSpace *check_for_imports(NameSpace *ns)
+{
+ while (ns) {
+ if (ns->get_path())
+ return ns;
+
+ ns = ns->get_ns();
+ }
+
+ return NULL;
+}
+
+void UserNameSpace::declare_import(const char *path)
+{
+ ImportContext ctx;
+ UserNameSpace *ns = new UserNameSpace();
+ string filename(path);
+ filename.append("/.self");
+
+ ctx.parent = NULL;
+ ctx.filename = path;
+
+ ctx.f = fopen(filename.c_str(), "rb");
+ if (!ctx.f) {
+ fprintf(stderr, "Cannot import \"%s\": %s.\n",
+ path, strerror(errno));
+
+ throw UserError();
+ }
+
+ const String *mount;
+ try {
+ if (fread(&ctx.hdr, sizeof(ctx.hdr), 1, ctx.f) != 1)
+ throw IOError(ctx.f);
+
+ if (ctx.hdr.magic == CompiledDefHeader::magic_normal)
+ ctx.swap = false;
+ else if (ctx.hdr.magic == CompiledDefHeader::magic_reversed)
+ ctx.swap = true;
+ else {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(bad magic 0x%08x).\n",
+ ctx.filename, ctx.hdr.magic);
+
+ throw UserError();
+ }
+
+ mount = read_string(ctx);
+ }
+
+ catch (IOError &e) {
+ fprintf(stderr, "Cannot read from file \"%s\": %s.\n",
+ ctx.filename, e.msg);
+
+ throw UserError();
+ }
+
+ StrList *strl = new StrList(mount);
+ if (!strl) {
+ fprintf(stderr, "\"%s\" is not a valid object "
+ "(mount point \"%s\" is not valid).\n",
+ ctx.filename, mount->c_str());
+
+ throw UserError();
+ }
+
+ ns->name = strl->back();
+ strl->pop_back();
+
+ if (strl->size() != 0) {
+ ctx.parent = add_nspace(strl, false);
+
+ if (!ctx.parent)
+ BUG();
+
+ NameSpace *conflict = check_for_imports(ctx.parent);
+ if (conflict) {
+ fprintf(stderr, "Import \"%s\" conflicts"
+ " with \"%s\" at \"%s\".\n",
+ path, conflict->get_fq_name()->flatten()->c_str(),
+ conflict->get_path()->c_str());
+
+ throw UserError();
+ }
+ } else {
+ ctx.parent = toplevel;
+ }
+
+ ns->path = new String(path);
+ ctx.parent->add_import(ns, ctx.filename);
+}
+
+void NameSpace::import_all()
+{
+ if (!path)
+ return;
+
+ DIR *dir = opendir(path->c_str());
+ if (!dir) {
+ fprintf(stderr, "Cannot open directory \"%s\".\n", path->c_str());
+ throw UserError();
+ }
+
+ struct dirent ent, *entp;
+ while (true) {
+ // readdir_r is buggy on osx 10.2, and will fail if errno is
+ // non-zero.
+ errno = 0;
+
+ if (readdir_r(dir, &ent, &entp)) {
+ fprintf(stderr, "1 Cannot readdir on \"%s\": %s.\n",
+ path->c_str(), strerror(errno));
+
+ closedir(dir);
+ throw UserError();
+ }
+
+ if (!entp)
+ break;
+
+ // Ignore ".", "..", and ".self".
+ if (ent.d_name[0] == '.')
+ continue;
+
+ try {
+ char *under = strchr(ent.d_name, '_');
+ if (!under) {
+ fprintf(stderr, "\"%s\" is not a valid namespace "
+ "(bad member \"%s\").\n",
+ path->c_str(), ent.d_name);
+
+ throw UserError();
+ }
+
+ String *s = new String(under + 1);
+ lookup(s);
+ }
+
+ catch (SymbolNotFound) {
+ fprintf(stderr, "\"%s\" is not a valid namespace "
+ "(member \"%s\" disappeared).\n",
+ path->c_str(), ent.d_name);
+
+ closedir(dir);
+ throw UserError();
+ }
+ }
+
+ closedir(dir);
+}
+
+void NameSpace::import_all_recursive()
+{
+ import_all();
+
+ for (const_iterator i = begin(); i != end(); ++i) {
+ Symbol *sym = (*i).second;
+ NameSpace *ns = dynamic_cast<NameSpace *>(sym);
+ if (ns)
+ ns->import_all_recursive();
+ }
+}
--- /dev/null
+/* lang.h -- Definitions used with language bindings
+ *
+ * Written by Scott Wood <scott@buserror.net>
+ */
+
+#ifndef IDLC_LANG_H
+#define IDLC_LANG_H
+
+#include <idlc.h>
+
+// A subclass of this interface is passed to Symbol::output_lang(),
+// which calls the appropriate type-specific method here. If the
+// sym is a Datum, Method, or Param, no callback is made; the language
+// binding is expected to handle it when iterating over the containing
+// type's element list.
+
+class LangCallback {
+public:
+ virtual ~LangCallback()
+ {
+ }
+
+ virtual void output(UserNameSpace *sym, int arg1, void *arg2) = 0;
+ virtual void output(Struct *sym, int arg1, void *arg2) = 0;
+ virtual void output(Interface *sym, int arg1, void *arg2) = 0;
+ virtual void output(BitField *sym, int arg1, void *arg2) = 0;
+ virtual void output(Enum *sym, int arg1, void *arg2) = 0;
+ virtual void output(BasicType *sym, int arg1, void *arg2) = 0;
+ virtual void output(Alias *sym, int arg1, void *arg2) = 0;
+ virtual void output(TypeDef *sym, int arg1, void *arg2) = 0;
+ virtual void output(Datum *sym, int arg1, void *arg2) = 0; // const only
+};
+
+class Language {
+public:
+ const char *name;
+ Language *next;
+
+ virtual ~Language()
+ {
+ }
+
+ virtual void output_root(UserNameSpace *ns, const char *dir) = 0;
+ virtual void output_server(UserNameSpace *ns, const char *dir) = 0;
+};
+
+extern Language *first_lang;
+
+#endif
--- /dev/null
+DIR := languages/c++/
+DIRS += $(DIR)
+
+RAW_CXXFILES := main interface-caller server
+BUILDCXXFILES += $(RAW_CXXFILES:%=$(DIR)%)
--- /dev/null
+#ifndef IDLC_CPP_H
+#define IDLC_CPP_H
+
+#include <fstream>
+#include <list>
+
+#include <lang.h>
+#include <util.h>
+
+using std::ofstream;
+using std::ostream;
+
+// Output the namespace-qualified symbol name, including
+// user namespaces. A prefix for the final component (i.e. the
+// actual name) can optionally be specified.
+
+void cpp_output_name(ostream &file, Symbol *sym, const char *prefix = "");
+
+// Output the C++ type, not including the array, including
+// a trailing space.
+
+void cpp_output_type(ostream &file, Type *t, bool array, bool is_mutable);
+void cpp_output_type(ostream &file, CompiledBasicType &t, bool is_mutable);
+
+// Output the param's fully-qualified type, a space, and the name
+// of the param.
+
+void cpp_output_one_param(ostream &file, Param *p, bool is_server,
+ bool is_copy = false);
+
+// Each IDL namespace is mapped to a C++ header file,
+// represented by a CPPFile.
+
+class CPPFile : public LangCallback {
+ Indent indent;
+ string dirname;
+ ofstream file;
+ UserNameSpace *ns;
+ StrList *fqname;
+
+ //// Interface caller-side members:
+
+ // Outputs the body of the internal _i_<ifacename> struct,
+ // containing a vtable pointer (and definition).
+
+ void output_internal(Interface *sym);
+
+ // Outputs the user-visible class wrapper that is used to
+ // allow implicit upcasts.
+
+ void output_wrapper(Interface *sym);
+
+ // Output implementations of the cast methods prototyped in the wrapper.
+ // The implementation is delayed to avoid circular dependency problems.
+
+ void output_casts(Interface *sym);
+
+ // Output the downcast and implicit upcast methods for
+ // the given interface/superinterface pair.
+
+ static void wrapper_cast_callback(Interface *iface, void *arg);
+ void output_downcast(Interface *iface, Interface *super);
+ void output_upcast(Interface *iface, Interface *super);
+
+ static void wrapper_cast_proto_callback(Interface *iface, void *arg);
+ void output_downcast_proto(Interface *iface, Interface *super);
+ void output_upcast_proto(Interface *iface, Interface *super);
+
+ static void wrapper_method_proto_callback(Interface *iface, void *arg);
+ static void wrapper_method_callback(Interface *iface, void *arg);
+
+ void output_iface_tbl(Interface *iface);
+ static void tbl_callback(Interface *iface, void *arg);
+ void output_iface_tbl_entry(Interface *iface, Interface *super);
+
+ static void output_method_ptrs_callback(Interface *iface, void *arg);
+ void output_method_ptrs(Interface *iface);
+ void output_methods(Interface *iface, Interface *super, bool prototype);
+ void output_method_defs(Interface *iface);
+
+ void CPPFile::output_one_method(Interface *iface, Method *m,
+ bool prototype, bool retval);
+ void CPPFile::output_one_method_ptr(Method *m, Interface *iface);
+
+ static void output_iface_ns(CPPFile *file, NameSpace *sym);
+
+ //// Misc members:
+
+ // Output the static const guid[] value.
+
+ void output_guid(const uint64_t *guid);
+
+ // Output a datum in the given struct, along with any necessary
+ // padding. Return the offset of the next datum.
+
+ int output_datum(Struct *ns, Datum *d, int offset);
+
+ static void output_vstruct_ns(CPPFile *file, NameSpace *sym);
+ void output_vstruct_info(Struct *sym);
+ void output_vstruct_main(Struct *sym);
+ void output_struct_ctor(Struct *sym, bool extra_vstruct);
+ int output_struct_ctor_rec1(Struct *sym, int num);
+ int output_struct_ctor_rec2(Struct *sym, int num);
+
+ void output_bf_elem(Datum *d, int pos, int bits, string &prefix);
+ void output_bf(BitField *bf, int bits, int typebits, string &prefix);
+
+ // Call ns_in on the symbol in order to declare the containing
+ // namespace. Call ns_out when finished. ns_out also takes care
+ // of inserting the extra newline separating declarations. The
+ // "extra" parameter allows extra output passes to have unique
+ // #ifndef names, and should have a trailing underscore.
+
+ void ns_in(Symbol *sym, const char *extra = "DEF_");
+ void ns_out(Symbol *sym);
+
+ // As above, but also outputs non-user namespaces (optionally
+ // including "sym" itself).
+
+ void all_ns_in(Symbol *sym, bool include_self = false,
+ const char *extra = "DEF_");
+ void all_ns_out(Symbol *sym, bool include_self = false);
+
+ // Output a protective #ifndef/#define block to keep a type
+ // from being declared more than once. Call ifndef_out()
+ // for the #endif.
+
+ void ifndef_in(Symbol *sym, const char *extra = "DEF_");
+ void ifndef_out();
+
+ // Return the namespace-qualified symbol name, excluding
+ // user namespaces. This is used when defining a name
+ // which (if it is nested) has been previously forward
+ // declared. A prefix for the final component (i.e. the
+ // actual name) can optionally be specified.
+
+ String &get_definition_name(Symbol *sym, const char *prefix = "");
+
+ // Output the _ns declaration of a struct or interface.
+ // If a callback is supplied, it is called inside the
+ // namespace, before any namespace members have been
+ // emitted.
+
+ typedef void (*nsdecl_callback)(CPPFile *file, NameSpace *ns);
+ void output_nsdecl(NameSpace *ns, nsdecl_callback cb = NULL);
+
+ void output_aliases_and_types(NameSpace *ns);
+
+ // When a top-level struct or interface is about to be generated,
+ // it is scanned for other types which need to be generated. If the
+ // full definition is needed, it goes on the need_to_declare list;
+ // otherwise, it goes on the need_to_forward_declare list. When
+ // processing the lists, if a node has already been marked as
+ // traversed, it is skipped.
+
+ // Full declaration is needed for superstructs, superinterfaces,
+ // inline structs (once implemented) used, and typedefs used.
+ // It is also necessary to fully declare any type which contains
+ // a type whose forward declaration is needed, as such types
+ // cannot (as far as I know) be forward-declared.
+
+ std::list<Symbol *> need_to_declare;
+
+ // Forward declaration is needed for types used via a pointer only.
+
+ std::list<Symbol *> need_to_forward_declare;
+
+ void declare_dependencies(Struct *sym);
+ void declare_dependencies(Interface *iface, bool need_obj_def = false);
+ void declare_type_dependency(Type *t, bool need_obj_def = false);
+
+ // This is the traversal of the first namespace; any symbol
+ // whose last traversal is at least this has been written to
+ // at least one of the included namespaces (and/or the
+ // current namespace).
+
+ int first_traversal;
+
+ // The traversal indices are also passed as "arg1" to determine
+ // which pass to generate. Higher numbers are performed first.
+
+ enum {
+ trav_full = 0, // Full, final output
+ trav_obj_def = 1, // Object struct definition
+ trav_obj_stub = 2, // Object pointer stub,
+ // so it can be used in structs.
+ trav_nsdecl = 3, // _ns declaration
+ trav_forward = 4, // Forward declaration of
+ // structs/interfaces in nsdecl
+ };
+
+ bool pass_needed(Symbol *sym, int pass)
+ {
+ return first_traversal > sym->traversed[pass];
+ }
+
+ void output_pass(Symbol *sym, int pass)
+ {
+ if (pass_needed(sym, pass)) {
+ sym->traversed[pass] = traversal;
+ sym->output_lang(this, pass);
+ }
+ }
+
+ bool do_extra_newline;
+
+ void downscope()
+ {
+ indent.indent_level++;
+ do_extra_newline = false;
+ }
+
+ void upscope()
+ {
+ indent.indent_level--;
+ do_extra_newline = true;
+ }
+
+ void extra_newline()
+ {
+ if (do_extra_newline)
+ file << "\n";
+
+ do_extra_newline = true;
+ }
+
+public:
+ CPPFile(UserNameSpace *ns, const char *dir);
+ virtual ~CPPFile();
+
+ void output(UserNameSpace *sym, int pass = trav_full, void *arg2 = NULL);
+ void output(Struct *sym, int pass = trav_full, void *arg2 = NULL);
+ void output(Interface *sym, int pass = trav_full, void *arg2 = NULL);
+ void output(BitField *sym, int pass = trav_full, void *arg2 = NULL);
+ void output(Enum *sym, int pass = trav_full, void *arg2 = NULL);
+ void output(BasicType *sym, int pass = trav_full, void *arg2 = NULL);
+ void output(Alias *sym, int pass = trav_full, void *arg2 = NULL);
+ void output(TypeDef *sym, int pass = trav_full, void *arg2 = NULL);
+ void output(Datum *sym, int pass = trav_full, void *arg2 = NULL);
+};
+
+class CPPBinding : public Language {
+public:
+ CPPBinding()
+ {
+ name = "C++";
+ next = first_lang;
+ first_lang = this;
+ }
+
+ void output_root(UserNameSpace *ns, const char *dir);
+ void output_server(UserNameSpace *ns, const char *dir);
+};
+
+#endif
--- /dev/null
+// idlcomp/languages/c++/interface-caller.cc -- caller-side interface definitions
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <targets.h>
+#include "c++.h"
+
+struct callback_data {
+ CPPFile *file;
+ Interface *iface;
+ bool prototype;
+ int count;
+};
+
+void CPPFile::output_iface_tbl_entry(Interface *iface, Interface *super)
+{
+ file << indent << "::System::RunTime::IFaceTable "
+ << *super->get_fq_name()->flatten("_IDLNS_") << ";\n";
+}
+
+void CPPFile::tbl_callback(Interface *super, void *arg)
+{
+ const callback_data *data = static_cast<const callback_data *>(arg);
+ data->file->output_iface_tbl_entry(data->iface, super);
+}
+
+void CPPFile::output_iface_tbl(Interface *iface)
+{
+ file << indent << "struct iface_table {\n";
+
+ indent.indent_level++;
+
+ output_iface_tbl_entry(iface, iface);
+
+ callback_data data = { this, iface };
+
+ iface->for_each_super<tbl_callback>(&data);
+ file << indent << "unsigned char *_end;\n";
+
+ indent.indent_level--;
+
+ file << indent << "} iface_table;\n\n";
+}
+
+void cpp_output_one_param(ostream &file, Param *p,
+ bool is_server, bool is_copy)
+{
+ bool is_mutable = p->def.flags.field.Shared ||
+ p->def.flags.field.Push ||
+ (is_server &&
+ (p->def.flags.field.Inline ||
+ is_copy));
+
+ if (dynamic_cast<Interface *>(*p->type)) {
+ cpp_output_name(file, p->type);
+ file << ' ';
+ } else if (p->type) {
+ cpp_output_type(file, p->type, p->is_array(), is_mutable);
+ } else {
+ cpp_output_type(file, p->def.basictype, is_mutable);
+ }
+
+ if (p->def.flags.field.Out)
+ file << '*';
+
+ if (dynamic_cast<Struct *>(*p->type) && !p->is_inline())
+ file << '*';
+
+ file << **p->name;
+}
+
+void CPPFile::output_one_method(Interface *iface, Method *m,
+ bool prototype, bool retval)
+{
+ const String &nsname = get_definition_name(iface);
+
+ file << '\n' << indent << "inline ";
+
+ if (!retval)
+ file << "void ";
+
+ if (!prototype)
+ file << get_definition_name(iface) << "::";
+
+ file << **m->name << '(';
+
+ if (prototype)
+ indent.align_spaces = m->name->length() + 8;
+ else
+ indent.align_spaces = m->name->length() + nsname.length() + 10;
+
+ if (!retval)
+ indent.align_spaces += 5;
+
+ bool first = true;
+ for (Method::entries_iterator i = m->entries_begin();
+ i != m->entries_end(); ++i)
+ {
+ if (!first)
+ file << ",\n" << indent;
+
+ first = false;
+
+ cpp_output_one_param(file, *i, false);
+ }
+
+ file << ")";
+ indent.align_spaces = 0;
+
+ if (prototype) {
+ file << ";\n";
+ return;
+ }
+
+ file << '\n' << indent << "{\n";
+ downscope();
+
+ // FIXME: It'd be nice to skip the NULL test in this cast,
+ // as
+
+ file << indent << "_ptr->info->methods." << **m->name
+ << "(static_cast< ";
+
+ cpp_output_name(file, m->get_ns());
+
+ file << ">(*this)._ptr";
+
+ indent.align_spaces = m->name->length() + 21;
+
+ for (Method::entries_iterator i = m->entries_begin();
+ i != m->entries_end(); ++i)
+ {
+ Param *p = *i;
+ file << ",\n" << indent << **p->name;
+ }
+
+ file << ");\n";
+ indent.align_spaces = 0;
+
+ upscope();
+ file << indent << "}\n";
+}
+
+void CPPFile::output_methods(Interface *iface, Interface *super, bool prototype)
+{
+ for (Interface::methods_iterator m = super->methods_begin();
+ m != super->methods_end(); ++m)
+ {
+ output_one_method(iface, *m, prototype, false);
+
+ /*
+ if (method has at least one out parameter)
+ output_one_method(iface, *m, prototype, true);
+ */
+ }
+}
+
+void CPPFile::output_one_method_ptr(Method *m, Interface *iface)
+{
+ Interface *super = static_cast<Interface *>(m->get_ns());
+ extra_newline();
+
+ file << indent << "void (*" << **m->name << ")(void *_this";
+ indent.align_spaces = m->name->length() + 9;
+
+ for (Method::entries_iterator p = m->entries_begin();
+ p != m->entries_end(); ++p)
+ {
+ file << ",\n" << indent;
+ cpp_output_one_param(file, *p, false);
+ }
+
+ file << ");\n";
+ indent.align_spaces = 0;
+}
+
+void CPPFile::output_method_ptrs_callback(Interface *super, void *arg)
+{
+ const callback_data *data = static_cast<const callback_data *>(arg);
+
+ for (Interface::methods_iterator m = super->methods_begin();
+ m != super->methods_end(); ++m)
+ data->file->output_one_method_ptr(*m, data->iface);
+}
+
+void CPPFile::output_method_ptrs(Interface *iface)
+{
+ file << indent << "struct methods {\n";
+ downscope();
+
+ callback_data data = { this, iface };
+
+ output_method_ptrs_callback(iface, &data);
+ iface->for_each_super<output_method_ptrs_callback>(&data);
+
+ upscope();
+ file << indent << "} methods;\n";
+}
+
+void CPPFile::output_internal(Interface *iface)
+{
+ file << indent << "struct info_type {\n";
+
+ indent.indent_level++;
+ int super_count = 0;
+
+ if (iface->supers_empty()) {
+ // No supers. If it's System.Object, output the top-level
+ // info_type. Otherwise, inherit from System.Object.
+
+ if (iface != System_Object) {
+ yyerrorf("Interface \"%s\" has no superinterface and "
+ "is not \"System.Object\".",
+ iface->get_fq_name()->flatten()->c_str());
+
+ throw UserError();
+ }
+
+ file << indent << "ptrdiff_t concrete;\n"
+ << indent << "::System::RunTime::IFaceTable "
+ "*concrete_IFaceTable;\n";
+ } else {
+ Interface *super = *iface->supers_begin();
+
+ file << indent;
+ cpp_output_name(file, super, "_i_");
+ file << "::info_type parent;\n";
+ }
+
+ file << '\n';
+
+ output_iface_tbl(iface);
+ output_method_ptrs(iface);
+
+ indent.indent_level--;
+
+ file << indent << "};\n\n"
+ << indent << "const info_type *const info;\n";
+
+ file << '\n' << indent << "_i_" << **iface->name << '(';
+ indent.align_spaces = iface->name->length() + 4;
+ cpp_output_name(file, iface, "_i_");
+
+ file << "::info_type *INFO) :\n"
+ << indent << "info(INFO)\n";
+
+ indent.align_spaces = 0;
+ file << indent << "{}\n";
+}
+
+void CPPFile::output_downcast_proto(Interface *iface, Interface *super)
+{
+ file << '\n' << indent << "static inline " << **iface->name << " downcast(";
+ cpp_output_type(file, super, false, false);
+ file << "oldptr);\n";
+}
+
+void CPPFile::output_downcast(Interface *iface, Interface *super)
+{
+ const String &name = get_definition_name(iface);
+ const String &iname = get_definition_name(iface, "_i_");
+
+ file << '\n' << indent << "inline " << name << ' '
+ << name << "::downcast(";
+ cpp_output_type(file, super, false, false);
+ file << "oldptr)";
+
+ file << "\n" << indent << "{\n";
+
+ indent.indent_level++;
+
+ file << indent << "::System::_i_Object *_llptr = \n"
+ << indent << "\treinterpret_cast< ::System::_i_Object *>(oldptr._ptr);\n"
+ << indent << "return " << name << "(reinterpret_cast< "
+ << iname << " *>\n"
+ << indent << "\t(::System::RunTime::downcast(_llptr, "
+ << name << "_ns::_guid.l)));\n";
+
+ indent.indent_level--;
+
+ file << indent << "}\n";
+}
+
+void CPPFile::output_upcast_proto(Interface *iface, Interface *super)
+{
+ const String &supername = *super->get_fq_name("_ns")->flatten("::");
+ file << indent << "inline operator ::" << supername << "();\n";
+}
+
+void CPPFile::output_upcast(Interface *iface, Interface *super)
+{
+ const String &supername = *super->get_fq_name("_ns")->flatten("::");
+
+ file << '\n' << indent << "inline "
+ << get_definition_name(iface)
+ << "::operator ::" << supername << "()\n"
+ << indent << "{\n";
+
+ indent.indent_level++;
+
+ // A space is added after the final '<', as GCC gives a parse
+ // error otherwise. I'm not sure exactly why...
+
+ file << indent << "uintptr_t ptr = reinterpret_cast<uintptr_t>(_ptr);\n\n"
+ << indent << "if (!_ptr)\n"
+ << indent << "\treturn NULL;\n\n"
+ << indent << "ptr += _ptr->info->iface_table."
+ << *super->get_fq_name()->flatten("_IDLNS_")
+ << ".offset;\n"
+ << indent << "return ::" << supername << "(reinterpret_cast< ";
+
+ cpp_output_name(file, super, "_i_");
+
+ file << " *>(ptr));\n";
+
+ indent.indent_level--;
+
+ file << indent << "}\n";
+}
+
+void CPPFile::wrapper_cast_callback(Interface *super, void *arg)
+{
+ const callback_data *data = static_cast<const callback_data *>(arg);
+
+ data->file->output_downcast(data->iface, super);
+ data->file->output_upcast(data->iface, super);
+}
+
+void CPPFile::wrapper_cast_proto_callback(Interface *super, void *arg)
+{
+ const callback_data *data = static_cast<const callback_data *>(arg);
+
+ data->file->output_downcast_proto(data->iface, super);
+ data->file->output_upcast_proto(data->iface, super);
+}
+
+void CPPFile::output_wrapper(Interface *iface)
+{
+ const String &name = **iface->name;
+
+ file << indent << "_i_" << name << " *_ptr;\n\n";
+
+ file << indent << name << "()\n"
+ << indent << "{\n"
+ << indent << "\t_ptr = NULL;\n"
+ << indent << "}\n\n";
+
+ file << indent << name << "(_i_" << name << " *_other)\n"
+ << indent << "{\n"
+ << indent << "\t_ptr = _other;\n"
+ << indent << "}\n\n";
+
+ file << indent << "operator _i_" << name << " *()\n"
+ << indent << "{\n"
+ << indent << "\treturn _ptr;\n"
+ << indent << "}\n";
+
+ do_extra_newline = true;
+
+ callback_data data = { this, iface, true };
+
+ iface->for_each_super<wrapper_cast_proto_callback>(&data);
+
+ output_methods(iface, iface, true);
+ iface->for_each_super<wrapper_method_callback>(&data);
+}
+
+void CPPFile::output_casts(Interface *iface)
+{
+ callback_data data = { this, iface };
+
+ iface->for_each_super<wrapper_cast_callback>(&data);
+}
+
+void CPPFile::wrapper_method_callback(Interface *super, void *arg)
+{
+ const callback_data *data = static_cast<const callback_data *>(arg);
+
+ data->file->output_methods(data->iface, super, data->prototype);
+}
+
+void CPPFile::output_method_defs(Interface *iface)
+{
+ output_methods(iface, iface, false);
+
+ callback_data data = { this, iface, false };
+ iface->for_each_super<wrapper_method_callback>(&data);
+}
--- /dev/null
+// idlcomp/languages/c++/main.cc -- C++ IDL binding
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+// FIXME: escape C++ reserved words
+
+#include <cerrno>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <targets.h>
+#include "c++.h"
+
+CPPBinding cppbinding;
+
+CPPFile::CPPFile(UserNameSpace *NS, const char *dir) :
+dirname(dir)
+{
+ indent.indent_level = 0;
+ indent.align_spaces = 0;
+ do_extra_newline = true;
+ ns = NS;
+
+ dirname.append(1, '/');
+ dirname.append(ns->name);
+ string headername(dirname);
+ headername.append(".h");
+
+ file.open(headername.c_str());
+ if (!file.is_open()) {
+ fprintf(stderr, "Could not create output file \"%s\": %s.\n",
+ headername.c_str(), strerror(errno));
+
+ throw UserError();
+ }
+
+ if (mkdir(dirname.c_str(), 0777) < 0) {
+ fprintf(stderr, "Could not create output directory \"%s\": %s.\n",
+ dir, strerror(errno));
+
+ throw UserError();
+ }
+
+ file << "// " << *ns->get_fq_name()->flatten(".")
+ << "\n// This is a C++ language binding generated by idlc.\n"
+ << "// Do not modify this file directly.\n\n";
+
+ fqname = ns->get_fq_name();
+ String *fqname_flat = ns->get_fq_name()->flatten("_IDLNS_");
+ file << "#ifndef IDL_HDR_" << *fqname_flat
+ << "\n#define IDL_HDR_" << *fqname_flat << "\n\n";
+
+ file << "#include <orb.h>\n";
+
+ first_traversal = ++traversal;
+ assert(first_traversal >= 0);
+
+ for (NameSpace::const_iterator i = ns->begin(); i != ns->end(); ++i) {
+ Symbol *sym = (*i).second;
+ UserNameSpace *uns = dynamic_cast<UserNameSpace *>(sym);
+
+ if (uns) {
+ file << "#include \"" << **ns->name << '/' << **uns->name << ".h\"\n";
+
+ // Process namespaces first, to minimize duplicate definitions
+ // if this namespace depends on something declared in a sub
+ // namespace.
+
+ output(uns);
+ }
+ }
+
+ for (NameSpace::const_iterator i = ns->begin(); i != ns->end(); ++i) {
+ Symbol *sym = (*i).second;
+ UserNameSpace *uns = dynamic_cast<UserNameSpace *>(sym);
+
+ if (!uns)
+ output_pass(sym, trav_nsdecl);
+ }
+
+ for (NameSpace::const_iterator i = ns->begin(); i != ns->end(); ++i) {
+ Symbol *sym = (*i).second;
+ UserNameSpace *uns = dynamic_cast<UserNameSpace *>(sym);
+
+ if (!uns)
+ output_pass(sym, trav_full);
+ }
+}
+
+CPPFile::~CPPFile()
+{
+ file << "#endif\n";
+
+ assert(indent.indent_level == 0);
+}
+
+void CPPFile::ifndef_in(Symbol *sym, const char *extra)
+{
+ StrList *fqname = sym->get_fq_name();
+ String *fqname_flat = fqname->flatten("_IDLNS_");
+
+ assert(indent.indent_level == 0);
+
+ file << "\n#ifndef IDL_DUP_" << extra << *fqname_flat
+ << "\n#define IDL_DUP_" << extra << *fqname_flat << "\n";
+}
+
+void CPPFile::ifndef_out()
+{
+ file << "\n#endif\n";
+}
+
+void CPPFile::ns_in(Symbol *sym, const char *extra)
+{
+ // Only output user namespaces here; if we're defining a nested type,
+ // it will have been forward declared already, and thus the
+ // non-user-namespaces can be specified directly in the definition.
+ //
+ // For non-forward-declarables such as metadata, use all_ns_in.
+
+ StrList *ns_fqname = sym->find_toplevel_type()->get_ns()->get_fq_name();
+
+ ifndef_in(sym, extra);
+
+ for (StrList::const_iterator i = ns_fqname->begin();
+ i != ns_fqname->end(); ++i) {
+ const String *str = *i;
+ file << "namespace " << *str << " {\n";
+ }
+
+ downscope();
+}
+
+void CPPFile::ns_out(Symbol *sym)
+{
+ upscope();
+ assert(indent.indent_level == 0);
+
+ for (Symbol *s = sym->find_toplevel_type()->get_ns(); s != toplevel;
+ s = s->get_ns())
+ file << "}";
+
+ ifndef_out();
+}
+
+void CPPFile::all_ns_in(Symbol *sym, bool include_self, const char *extra)
+{
+ NameSpace *ns = sym->find_toplevel_type()->get_ns();
+ StrList *ns_fqname = ns->get_fq_name();
+
+ ifndef_in(sym, extra);
+
+ for (StrList::const_iterator i = ns_fqname->begin();
+ i != ns_fqname->end(); ++i) {
+ const String *str = *i;
+ file << "namespace " << *str << " {\n";
+ }
+
+ stack<SymbolRef> typens;
+
+ if (!include_self)
+ sym = sym->get_ns();
+
+ for (Symbol *s = sym; s != ns; s = s->get_ns())
+ typens.push(s);
+
+ while (!typens.empty()) {
+ Symbol *s = typens.top();
+ typens.pop();
+ file << "namespace " << **s->name << "_ns {\n";
+ }
+
+ downscope();
+}
+
+void CPPFile::all_ns_out(Symbol *sym, bool include_self)
+{
+ upscope();
+ assert(indent.indent_level == 0);
+
+ if (!include_self)
+ sym = sym->get_ns();
+
+ for (Symbol *s = sym; s != toplevel; s = s->get_ns())
+ file << "}";
+
+ ifndef_out();
+}
+
+String &CPPFile::get_definition_name(Symbol *sym, const char *prefix)
+{
+ NameSpace *ns = sym->get_ns();
+ UserNameSpace *uns = dynamic_cast<UserNameSpace *>(ns);
+ String *str;
+
+ if (!uns) {
+ str = &get_definition_name(ns);
+ str->append("_ns::");
+ } else {
+ str = new String();
+ }
+
+ str->append(prefix);
+ str->append(**sym->name);
+ return *str;
+}
+
+void cpp_output_name(ostream &file, Symbol *sym, const char *prefix)
+{
+ StrList *sl = sym->get_fq_name("_ns");
+ sl->pop_back();
+
+ file << "::" << *sl->flatten("::") << "::" << prefix << **sym->name;
+}
+
+void CPPFile::output(UserNameSpace *sym, int pass, void *arg2)
+{
+ assert(indent.indent_level == 0);
+ delete new CPPFile(sym, dirname.c_str());
+}
+
+static inline int round_up_bits(int bits)
+{
+ assert(bits >= 1 && bits <= 64);
+
+ if (bits > 32)
+ return 64;
+ if (bits > 16)
+ return 32;
+ if (bits > 8)
+ return 16;
+
+ return 8;
+}
+
+// FIXME: Inline arrays...
+void cpp_output_type(ostream &file, Type *t, bool array, bool is_mutable)
+{
+ if (array) {
+ file << "::System::RunTime::";
+
+ if (is_mutable)
+ file << "Mutable";
+
+ file << "Array< ";
+ }
+
+ cpp_output_name(file, t);
+
+ if (array)
+ file << ">";
+
+ file << ' ';
+}
+
+// FIXME: Inline arrays...
+void cpp_output_type(ostream &file, CompiledBasicType &t, bool is_mutable)
+{
+ if (is_array(t)) {
+ file << "::System::RunTime::";
+
+ if (is_mutable)
+ file << "Mutable";
+
+ file << "Array< ";
+ }
+
+ if (t.flags.field.Bool) {
+ // Don't rely on C++ to provide any particular representation
+ // of bool...
+ file << "uint8_t";
+ } else if (t.flags.field.Float) {
+ if (t.bits == 32)
+ file << "float";
+ else
+ file << "double";
+ } else {
+ if (t.flags.field.Unsigned)
+ file << 'u';
+
+ file << "int" << round_up_bits(t.bits) << "_t";
+ }
+
+ if (is_array(t))
+ file << '>';
+
+ file << ' ';
+}
+
+// FIXME: implement padding
+int CPPFile::output_datum(Struct *ns, Datum *d, int offset)
+{
+ if (d->type) {
+ cpp_output_type(file, d->type, is_array(d->def.basictype),
+ !d->def.flags.field.Immutable);
+
+ Struct *dtype = dynamic_cast<Struct *>(*d->type);
+ if (dtype && !dtype->is_inline())
+ file << '*';
+ } else {
+ cpp_output_type(file, d->def.basictype,
+ !d->def.flags.field.Immutable);
+ }
+
+ return offset;
+}
+
+void CPPFile::declare_type_dependency(Type *t, bool need_obj_def)
+{
+ Symbol *toplevel_type = t->find_toplevel_type();
+
+ assert(indent.indent_level == 0);
+
+ output_pass(toplevel_type, trav_nsdecl);
+
+ if (t == toplevel_type)
+ output_pass(t, trav_forward);
+
+ if (dynamic_cast<Interface *>(t))
+ output_pass(t, need_obj_def ? trav_obj_def : trav_obj_stub);
+ else if (!dynamic_cast<Struct *>(t) || need_obj_def)
+ output_pass(t, trav_full);
+}
+
+void CPPFile::declare_dependencies(Interface *iface, bool need_obj_def)
+{
+ for (Interface::methods_iterator i = iface->methods_begin();
+ i != iface->methods_end(); ++i)
+ {
+ Method *m = *i;
+ for (Method::entries_iterator j = m->entries_begin();
+ j != m->entries_end(); ++j)
+ {
+ Param *p = *j;
+ if (p->type)
+ declare_type_dependency(p->type, need_obj_def);
+ }
+ }
+
+ for (Interface::supers_iterator i = iface->supers_begin();
+ i != iface->supers_end(); ++i)
+ {
+ Interface *super = *i;
+ declare_type_dependency(super);
+ }
+}
+
+void CPPFile::declare_dependencies(Struct *str)
+{
+ for (NameSpace::const_iterator i = str->begin(); i != str->end(); ++i) {
+ Symbol *sym = (*i).second;
+ Datum *d = dynamic_cast<Datum *>(sym);
+ if (d) {
+ if (d->type)
+ declare_type_dependency(d->type, d->is_inline());
+
+ continue;
+ }
+
+ Struct *mstr = dynamic_cast<Struct *>(sym);
+ if (mstr) {
+ declare_dependencies(mstr);
+ continue;
+ }
+
+ Interface *miface = dynamic_cast<Interface *>(sym);
+ if (miface)
+ declare_dependencies(miface);
+ }
+}
+
+void CPPFile::output_guid(const uint64_t *guid64)
+{
+ const unsigned char *guid = reinterpret_cast<const unsigned char *>(guid64);
+ char guidhex[7];
+
+ file << indent << "static const __attribute__((unused)) union {\n"
+ << indent << "\tunsigned char c[16];\n"
+ << indent << "\tunsigned long l[];\n"
+ << indent << "} _guid = {\n"
+ << indent << "\t{ ";
+
+ for (int i = 0; i < 16; i++) {
+ if (i == 8)
+ file << '\n' << indent << "\t ";
+
+ sprintf(guidhex, "0x%02x, ", *guid++);
+ file << guidhex;
+ }
+
+ file << "}\n"
+ << indent << "};\n";
+
+ do_extra_newline = true;
+}
+
+void CPPFile::output_nsdecl(NameSpace *ns, nsdecl_callback cb)
+{
+ // If indent level is not zero, this is a nested struct or interface.
+ if (indent.indent_level == 0)
+ ns_in(ns, "NS_");
+ else
+ extra_newline();
+
+ file << indent << "namespace " << **ns->name << "_ns {\n";
+ downscope();
+
+ if (cb)
+ cb(this, ns);
+
+ for (NameSpace::const_iterator i = ns->begin(); i != ns->end(); ++i) {
+ Symbol *sym = (*i).second;
+
+ assert(pass_needed(sym, trav_forward));
+ assert(pass_needed(sym, trav_nsdecl));
+
+ // If it's a Method or a non-const Datum, this is a no-op.
+ output_pass(sym, trav_forward);
+ output_pass(sym, trav_nsdecl);
+ }
+
+ upscope();
+
+ file << indent << "}\n";
+
+ if (indent.indent_level == 1)
+ ns_out(ns);
+}
+
+void CPPFile::output_aliases_and_types(NameSpace *ns)
+{
+ for (NameSpace::const_iterator i = ns->begin(); i != ns->end(); ++i) {
+ Symbol *sym = (*i).second;
+ if (dynamic_cast<Alias *>(sym) || dynamic_cast<Type *>(sym))
+ output_pass(sym, trav_full);
+ }
+}
+
+void CPPFile::output_vstruct_ns(CPPFile *cpp, NameSpace *sym)
+{
+ Struct *str = dynamic_cast<Struct *>(sym);
+ assert(str);
+ assert(str->is_virtual());
+
+ cpp->output_guid(str->def.guid);
+}
+
+void CPPFile::output_vstruct_info(Struct *sym)
+{
+ all_ns_in(sym, true, "VINFO_");
+
+ file << indent << "static const __attribute__((unused)) "
+ "unsigned long *const _guids[] = {\n";
+
+ stack<StructRef> supers;
+ sym->chainlen = 0;
+
+ for (Struct *i = sym; i; i = i->get_super()) {
+ sym->chainlen++;
+ supers.push(i);
+ }
+
+ for (int i = 0; i < sym->chainlen; i++) {
+ Struct *super = supers.top();
+ supers.pop();
+
+ file << indent << '\t';
+ cpp_output_name(file, super);
+ file << "_ns::_guid.l,\n";
+ }
+
+ file << indent << "};\n\n"
+ << indent << "static const __attribute__((unused)) "
+ "::System::RunTime::VStructInfo _info = {\n"
+ << indent << "\t_guids, " << sym->chainlen << '\n'
+ << indent << "};\n";
+
+
+ all_ns_out(sym, true);
+}
+
+void CPPFile::output_vstruct_main(Struct *sym)
+{
+ assert(sym->is_virtual());
+ Struct *super = sym->get_super();
+
+ const char *name = sym->name->c_str();
+
+ if (!super) {
+ assert(sym == System_VStruct);
+ file << indent << "const ::System::RunTime::VStructInfo "
+ "*const _infoptr;\n\n";
+ }
+
+ file << indent << name
+ << "(const ::System::RunTime::VStructInfo *realinfo = &"
+ << name << "_ns::_info) :\n"
+ << indent;
+
+ if (!super)
+ file << "_infoptr";
+ else
+ cpp_output_name(file, super);
+
+ file << "(realinfo)\n"
+ << indent << "{\n"
+ << indent << "}\n";
+
+ if (super)
+ file << '\n'
+ << indent << "static " << name << " *downcast(::System::VStruct *base)\n"
+ << indent << "{\n"
+ << indent << "\tif (!base)\n"
+ << indent << "\t\treturn NULL;\n\n"
+ << indent << "\tconst ::System::RunTime::VStructInfo *info = base->_infoptr;\n\n"
+ << indent << "\tif (info->chainlen < " << sym->chainlen << ")\n"
+ << indent << "\t\treturn NULL;\n\n"
+ << indent << "\tif (::System::RunTime::guids_equal(info->guids["
+ << sym->chainlen - 1 << "], " << name << "_ns::_guid.l))\n"
+ << indent << "\t\treturn static_cast<" << name << " *>(base);\n\n"
+ << indent << "\treturn NULL;\n"
+ << indent << "}\n";
+
+ do_extra_newline = true;
+}
+
+// Output an init method that initializes all the elements in the
+// struct; this is useful for throwing exceptions. Due to the
+// annoying, apparently unwaiveable-by-the-struct requirement that any
+// struct with a ctor be initialized using the ctor (and thus at
+// runtime, not statically), this can't be an actual ctor in
+// non-virtual structs.
+//
+// In virtual structs, there's already a ctor, so it wouldn't
+// matter. It'd generally be best to be consistent and use the
+// init method for both, but the main intended use for this is
+// throwing exceptions (which are virtual), and it'd be
+// unfortunate to cripple 99% of the uses with unnecessary
+// syntactic cruft. The init method will remain available
+// for vstructs so that things won't break if a non-vstruct
+// gets made virtual (or if a user really wants to be consistent
+// between both types in their own code).
+
+void CPPFile::output_struct_ctor(Struct *sym, bool extra_vstruct)
+{
+ const char *method_name = extra_vstruct ? sym->name->c_str() : " _init";
+
+ file << '\n' << indent << **sym->name;
+ indent.align_spaces = sym->name->length() + 1;
+
+ if (!extra_vstruct) {
+ file << " &_init";
+ indent.align_spaces += 7;
+ }
+
+ file << '(';
+ output_struct_ctor_rec1(sym, 0);
+
+ if (extra_vstruct) {
+ Struct *super = sym->get_super();
+
+ file << ",\n" << indent
+ << "const ::System::RunTime::VStructInfo *_realinfo = &"
+ << **sym->name << "_ns::_info) :\n";
+
+ indent.align_spaces = 0;
+ file << indent;
+
+ if (!super)
+ file << "_infoptr";
+ else
+ cpp_output_name(file, super);
+
+ file << "(_realinfo";
+ }
+
+ indent.align_spaces = 0;
+ file << ")\n" << indent << "{\n";
+
+ indent.indent_level++;
+
+ output_struct_ctor_rec2(sym, 0);
+
+ if (!extra_vstruct)
+ file << indent << "return *this;\n";
+
+ indent.indent_level--;
+ file << indent << "}\n";
+}
+
+int CPPFile::output_struct_ctor_rec1(Struct *sym, int num)
+{
+ if (sym->get_super())
+ num = output_struct_ctor_rec1(sym->get_super(), num);
+
+ for (Struct::entries_iterator i = sym->entries_begin();
+ i != sym->entries_end(); ++i)
+ {
+ if (num++)
+ file << ",\n" << indent;
+
+ output_datum(sym, *i, -1);
+ file << "_arg" << num;
+ }
+
+ return num;
+}
+
+int CPPFile::output_struct_ctor_rec2(Struct *sym, int num)
+{
+ if (sym->get_super())
+ num = output_struct_ctor_rec2(sym->get_super(), num);
+
+ for (Struct::entries_iterator i = sym->entries_begin();
+ i != sym->entries_end(); ++i)
+ {
+ Datum *d = *i;
+ file << indent << **d->name << " = _arg" << ++num << ";\n";
+ }
+
+ return num;
+}
+
+void CPPFile::output(Struct *sym, int pass, void *arg2)
+{
+ switch (pass) {
+ case trav_nsdecl:
+ if (sym->is_virtual()) {
+ output_nsdecl(sym, output_vstruct_ns);
+ } else {
+ output_nsdecl(sym);
+ }
+
+ break;
+
+ case trav_forward: {
+ bool nested = indent.indent_level != 0;
+
+ if (!nested)
+ ns_in(sym, "FWD_");
+ else
+ extra_newline();
+
+ file << indent << "struct " << **sym->name << ";\n";
+
+ if (!nested)
+ ns_out(sym);
+
+ break;
+ }
+
+ case trav_full: {
+ output_pass(sym, trav_nsdecl);
+ Struct *super = sym->get_super();
+
+ if (super)
+ output_pass(super, trav_full);
+
+ if (sym->is_virtual())
+ output_vstruct_info(sym);
+
+ declare_dependencies(sym);
+ ns_in(sym);
+
+ file << indent << "struct " << get_definition_name(sym);
+
+ if (super) {
+ const String *supername = super->get_fq_name("_ns")
+ ->flatten("::");
+ file << " :\n" << indent << "public ::" << *supername;
+ }
+
+ file << "\n" << indent << "{\n";
+ downscope();
+
+ if (sym->is_virtual())
+ output_vstruct_main(sym);
+
+ int offset = 0;
+ for (Struct::entries_iterator i = sym->entries_begin();
+ i != sym->entries_end(); ++i)
+ {
+ extra_newline();
+ Datum *d = *i;
+ file << indent;
+ offset = output_datum(sym, d, offset);
+ file << **d->name << ";\n";
+ }
+
+ bool empty_struct = true;
+ for (Struct *s = sym; s; s = s->get_super()) {
+ if (s->entries_begin() != s->entries_end()) {
+ empty_struct = false;
+ break;
+ }
+ }
+
+ if (!empty_struct) {
+ output_struct_ctor(sym, false);
+
+ if (sym->is_virtual())
+ output_struct_ctor(sym, true);
+ }
+
+ upscope();
+ file << indent << "};\n";
+
+ ns_out(sym);
+
+ output_aliases_and_types(sym);
+
+ for (NameSpace::const_iterator i = sym->begin(); i != sym->end(); ++i) {
+ Symbol *sym2 = (*i).second;
+ output_pass(sym2, trav_full);
+ }
+
+ break;
+ }
+
+ default:
+ BUG();
+ }
+}
+
+void CPPFile::output_iface_ns(CPPFile *file, NameSpace *sym)
+{
+ Interface *i = dynamic_cast<Interface *>(sym);
+ assert(i);
+
+ file->output_guid(i->def.guid);
+}
+
+void CPPFile::output(Interface *sym, int pass, void *arg2)
+{
+ switch (pass) {
+ case trav_nsdecl:
+ output_nsdecl(sym, output_iface_ns);
+ break;
+
+ case trav_forward: {
+ bool nested = indent.indent_level != 0;
+
+ if (!nested)
+ ns_in(sym, "FWD_");
+ else
+ extra_newline();
+
+ file << indent << "struct " << **sym->name << ";\n"
+ << indent << "struct _i_" << **sym->name << ";\n";
+
+ if (!nested)
+ ns_out(sym);
+
+ break;
+ }
+
+ case trav_obj_stub: {
+ output_pass(sym, trav_forward);
+
+ for (Interface::supers_iterator i = sym->supers_begin();
+ i != sym->supers_end(); ++i)
+ {
+ Interface *super = *i;
+ output_pass(super, trav_obj_stub);
+ }
+
+ declare_dependencies(sym);
+ ns_in(sym, "STUB_");
+
+ file << indent << "struct "
+ << get_definition_name(sym) << " {\n";
+
+ downscope();
+
+ output_wrapper(sym);
+
+ upscope();
+ file << indent << "};\n";
+
+ ns_out(sym);
+ break;
+ }
+
+ case trav_obj_def: {
+ output_pass(sym, trav_obj_stub);
+
+ for (Interface::supers_iterator i = sym->supers_begin();
+ i != sym->supers_end(); ++i)
+ {
+ Interface *super = *i;
+ output_pass(super, trav_full);
+ }
+
+ declare_dependencies(sym);
+ ns_in(sym, "OBJDEF_");
+
+ file << indent << "struct ";
+ file << get_definition_name(sym, "_i_") << " {\n";
+
+ downscope();
+
+ output_internal(sym);
+
+ upscope();
+ file << indent << "};\n\n";
+
+ output_casts(sym);
+
+ ns_out(sym);
+ break;
+
+ case trav_full:
+ output_pass(sym, trav_obj_def);
+
+ declare_dependencies(sym, true);
+ ns_in(sym);
+
+ output_method_defs(sym);
+
+ ns_out(sym);
+ output_aliases_and_types(sym);
+
+ for (NameSpace::const_iterator i = sym->begin(); i != sym->end(); ++i) {
+ Symbol *sym2 = (*i).second;
+ output_pass(sym2, trav_full);
+ }
+
+ break;
+
+ default:
+ BUG();
+ }
+ }
+}
+
+void CPPFile::output_bf_elem(Datum *d, int pos, int bits,
+ string &prefix)
+{
+ Type *t = d->type;
+ BitField *bf = dynamic_cast<BitField *>(t);
+ Enum *en = dynamic_cast<Enum *>(t);
+
+ string fieldname;
+
+ if (!d->name->compare(0, 4, "get_") ||
+ !d->name->compare(0, 4, "set_"))
+ fieldname = '_';
+
+ fieldname.append(**d->name);
+
+ if (bf) {
+ string newprefix(prefix);
+ newprefix.append(**d->name);
+ newprefix.append("_IDLNS_");
+ output_bf(bf, d->def.icon, bits, newprefix);
+
+ // FIXME: getters and setters
+ } else if (en || !t) {
+ file << indent << "uint" << bits << "_t "
+ << prefix << fieldname << ':' << d->def.icon << ";\n";
+ } else {
+ // This is checked here rather than in input.cc, because
+ // I'm lazy.
+
+ fprintf(stderr, "idlc: Bad input: \"%s\" cannot be the type of \"%s\"\n",
+ t->get_fq_name()->flatten()->c_str(),
+ d->get_fq_name()->flatten()->c_str());
+
+ throw UserError();
+ }
+}
+
+void CPPFile::output_bf(BitField *sym, int bits, int typebits,
+ string &prefix)
+{
+ int size = 0;
+
+ assert(bits == sym->def.bits || bits == typebits);
+
+ for (BitField::entries_iterator i = sym->entries_begin();
+ i != sym->entries_end(); ++i)
+ size += (*i)->def.icon;
+
+ if (size > sym->def.bits) {
+ // FIXME: This isn't detected in the front end,
+ // but even once it is, this should stay as a
+ // replacement for input.cc checking.
+
+ fprintf(stderr, "idlc: \"%s\" is too small (%d bits) for its "
+ "contents (%d bits)\n",
+ sym->get_fq_name()->flatten()->c_str(),
+ sym->def.bits, size);
+
+ throw UserError();
+ }
+
+ if (target->bitfield_big_endian) {
+ if (size != bits) {
+ // The prefix is put at the end, so that we can avoid
+ // consecutive underscores or an underscore followed
+ // by a capital, both of which are reserved in C++.
+
+ file << indent << "uint" << bits << "_t _pad_" << prefix
+ << ':' << bits - size << ";\n";
+ }
+
+ int pos = sym->def.bits;
+
+ for (BitField::entries_reverse_iterator i = sym->entries_rbegin();
+ i != sym->entries_rend(); ++i)
+ {
+ Datum *d = *i;
+ pos -= d->def.icon;
+ output_bf_elem(d, pos, typebits, prefix);
+ }
+ } else {
+ int pos = 0;
+
+ for (BitField::entries_iterator i = sym->entries_begin();
+ i != sym->entries_end(); ++i)
+ {
+ Datum *d = *i;
+ output_bf_elem(d, pos, typebits, prefix);
+ pos += d->def.icon;
+ }
+ }
+}
+
+void CPPFile::output(BitField *sym, int pass, void *arg2)
+{
+ switch (pass) {
+ case trav_nsdecl:
+ output_nsdecl(sym);
+ break;
+
+ case trav_forward:
+ extra_newline();
+ file << indent << "union " << **sym->name << ";\n";
+ break;
+
+ case trav_full: {
+ int bits = round_up_bits(sym->def.bits);
+ ns_in(sym);
+
+ file << indent << "union ";
+ file << get_definition_name(sym) << " {\n";
+ downscope();
+
+ file << indent << "struct {\n";
+
+ downscope();
+ string nullprefix;
+ output_bf(sym, bits, bits, nullprefix);
+ upscope();
+
+ file << indent << "};\n\n"
+ << indent << "uint" << bits << "_t _raw;\n\n"
+ << indent << **sym->name << "()\n"
+ << indent << "{\n"
+ << indent << "\t_raw = 0;\n"
+ << indent << "}\n\n"
+ << indent << **sym->name << "(uint" << bits << "_t _init)\n"
+ << indent << "{\n"
+ << indent << "\t_raw = _init;\n"
+ << indent << "}\n\n"
+ << indent << "operator uint" << bits << "_t()\n"
+ << indent << "{\n"
+ << indent << "\treturn _raw;\n"
+ << indent << "}\n";
+
+ upscope();
+ file << indent << "};\n";
+
+ ns_out(sym);
+ output_aliases_and_types(sym);
+ break;
+ }
+
+ default:
+ BUG();
+ }
+}
+
+
+void CPPFile::output(Enum *sym, int pass, void *arg2)
+{
+ switch (pass) {
+ case trav_nsdecl:
+ /* no-op */
+ break;
+
+ case trav_forward: {
+ bool do_ns_out = false;
+
+ if (indent.indent_level == 0) {
+ ns_in(sym);
+ do_ns_out = true;
+ } else {
+ extra_newline();
+ }
+
+ file << indent << "struct " << **sym->name << " {\n";
+ downscope();
+
+ for (Enum::entries_iterator i = sym->entries_begin();
+ i != sym->entries_end(); ++i)
+ {
+ Datum *d = *i;
+
+ file << indent << "static const uint"
+ << round_up_bits(sym->def.bits) << "_t "
+ << **d->name << " = " << d->def.ucon << ";\n";
+ }
+
+ file << '\n' << indent << "unsigned int _val;\n\n";
+
+ file << indent << **sym->name << "()\n"
+ << indent << "{\n"
+ << indent << "\t_val = 0;\n"
+ << indent << "}\n\n";
+
+ file << indent << **sym->name << "(unsigned int val)\n"
+ << indent << "{\n"
+ << indent << "\t_val = val;\n"
+ << indent << "}\n\n";
+
+ file << indent << "operator unsigned int()\n"
+ << indent << "{\n"
+ << indent << "\treturn _val;\n"
+ << indent << "}\n\n";
+
+ upscope();
+ file << indent << "};\n";
+
+ if (do_ns_out)
+ ns_out(sym);
+
+ break;
+ }
+
+ case trav_full:
+ // Nothing particular to do here, other than to make sure
+ // that trav_forward has happened (which will always need to
+ // be done here if it's not a nested type).
+
+ output_pass(sym, trav_forward);
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+void CPPFile::output(BasicType *sym, int pass, void *arg2)
+{
+ switch (pass) {
+ case trav_nsdecl:
+ /* no-op */
+ break;
+
+ case trav_forward: {
+ bool do_ns_out = false;
+
+ if (indent.indent_level == 0) {
+ ns_in(sym);
+ do_ns_out = true;
+ } else {
+ extra_newline();
+ }
+
+ file << indent << "typedef ";
+ assert(!is_array(sym->def));
+ cpp_output_type(file, sym->def, false);
+ file << **sym->name << ";\n";
+
+ if (do_ns_out)
+ ns_out(sym);
+
+ break;
+ }
+
+ case trav_full:
+ output_pass(sym, trav_forward);
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+void CPPFile::output(Alias *sym, int pass, void *arg2)
+{
+ switch (pass) {
+ case trav_nsdecl:
+ /* no-op */
+ break;
+
+ case trav_forward: {
+ bool do_ns_out = false;
+
+ if (indent.indent_level == 0) {
+ all_ns_in(sym);
+ do_ns_out = true;
+ } else {
+ extra_newline();
+ }
+
+ const String *type = sym->get_concrete_sym()->get_fq_name("_ns")
+ ->flatten("::");
+
+ file << indent << "typedef " << *type << " "
+ << **sym->name << ";\n";
+
+ if (do_ns_out)
+ all_ns_out(sym);
+
+ break;
+ }
+
+ case trav_full:
+ output_pass(sym, trav_forward);
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+void CPPFile::output(TypeDef *sym, int pass, void *arg2)
+{
+ switch (pass) {
+ case trav_nsdecl:
+ case trav_forward:
+ /* no-op */
+ break;
+
+ case trav_full: {
+ output_pass(sym->get_concrete_sym(), trav_forward);
+
+ bool do_ns_out = false;
+
+ if (indent.indent_level == 0) {
+ all_ns_in(sym);
+ do_ns_out = true;
+ } else {
+ extra_newline();
+ }
+
+ const String *type = sym->get_concrete_sym()->get_fq_name("_ns")
+ ->flatten("::");
+
+ file << indent << "typedef " << *type << " "
+ << **sym->name << ";\n";
+
+ if (do_ns_out)
+ all_ns_out(sym);
+
+ break;
+ }
+
+ default:
+ BUG();
+ }
+}
+
+void CPPFile::output(Datum *sym, int pass, void *arg2)
+{
+ assert(sym->def.flags.field.Const);
+
+ switch (pass) {
+ case trav_nsdecl:
+ case trav_forward:
+ /* no-op */
+ break;
+
+ case trav_full: {
+ if (sym->type)
+ declare_type_dependency(sym->type, false);
+
+ bool do_ns_out = false;
+
+ if (indent.indent_level == 0) {
+ all_ns_in(sym);
+ do_ns_out = true;
+ } else {
+ extra_newline();
+ }
+
+ file << indent << "static const ";
+
+ assert(!is_array(sym->def.basictype));
+
+ if (sym->type)
+ cpp_output_type(file, sym->type, false, false);
+ else
+ cpp_output_type(file, sym->def.basictype, false);
+
+ file << **sym->name << " = ";
+
+ CompiledBasicType *def;
+ if (sym->type) {
+ Symbol *real_type = sym->type->get_concrete_sym();
+ BasicType *bt = dynamic_cast<BasicType *>(real_type);
+ assert(bt);
+
+ def = &bt->def;
+ } else {
+ def = &sym->def.basictype;
+ }
+
+ if (def->flags.field.Float) {
+ file << sym->def.fcon;
+ } else if (def->flags.field.Bool) {
+ if (sym->def.ucon == 0)
+ file << "false";
+ else
+ file << "true";
+ } else {
+ if (def->flags.field.Unsigned) {
+ file << "0x" << std::hex << sym->def.ucon << std::dec << 'U';
+ } else {
+ file << sym->def.icon;
+ }
+
+ file << "LL";
+ }
+
+ file << ";\n";
+
+ if (do_ns_out)
+ all_ns_out(sym);
+
+ break;
+ }
+
+ default:
+ BUG();
+ }
+}
+
+void CPPBinding::output_root(UserNameSpace *ns, const char *dir)
+{
+ delete new CPPFile(ns, dir);
+}
--- /dev/null
+// idlcomp/languages/c++/server.cc -- C++ server-side stubs
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <fstream>
+#include <errno.h>
+
+#include <idlc.h>
+#include <cdl.h>
+#include <targets.h>
+#include "c++.h"
+
+using std::ofstream;
+
+static void output_ifaceptrs(Class *cla, ostream &f)
+{
+ f << "// " << *cla->get_fq_name()->flatten("::") << "\n"
+ "// This is a C++ server class mixin generated by idlc.\n"
+ "// Do not modify this file directly.\n"
+ "// This file provides data and methods used by the object system\n"
+ "// to connect this class to its interface(s). Include it inside\n"
+ "// the class definition as \"public:\", and call the init_iface()\n"
+ "// method from your constructor(s).\n\n";
+
+
+ Interface *conc = cla->concrete_iface;
+ assert(conc->get_num_chains() > 0);
+
+ f << "struct IDL_ifaceptr {\n\t";
+
+ for (int i = 0; i < conc->get_num_chains(); i++) {
+ Interface *iface = conc->get_chain_head(i);
+
+ // Skip the "fake" aggregation interface on the first chain.
+ if (i == 0)
+ iface = *iface->supers_begin();
+
+ cpp_output_name(f, iface, "_i_");
+ f << " p" << i << ";\n\t";
+ }
+
+ f << **cla->name << " *priv;\n"
+ "\tIDL_ifaceptr();\n"
+ "};\n\n";
+}
+
+static void output_classptr(Class *cla, Interface *iface, ostream &f)
+{
+ int chain = cla->concrete_iface->super_to_chain(iface);
+
+ f << "static " << **cla->name << " *classptr(";
+ cpp_output_name(f, iface);
+ f << " ref)\n"
+ "{\n"
+ "\tif (ref && static_cast<const void *>(ref._ptr->info) ==\n"
+ "\t static_cast<const void *>(&IDL_info.p" << chain << "))\n"
+ "\t{\n"
+ "\t\tuintptr_t ptr = reinterpret_cast<uintptr_t>(ref._ptr);\n"
+ "\t\tptr -= " << chain * target->pointer_size << ";\n"
+ "\t\treturn reinterpret_cast<IDL_ifaceptr *>(ref._ptr)->priv;\n"
+ "\t}\n\n"
+ "\treturn NULL;\n"
+ "}\n\n";
+}
+
+static void output_convfunc(Class *cla, Interface *iface, ostream &f)
+{
+ f << "operator ";
+
+ cpp_output_name(f, iface);
+
+ f << "()\n"
+ "{\n"
+ "\treturn ";
+
+ cpp_output_name(f, iface);
+
+ int chain = cla->concrete_iface->super_to_chain(iface);
+
+ f << "(reinterpret_cast< ";
+ cpp_output_name(f, iface, "_i_");
+ f << " *>(&IDL_ref.p" << chain << "));\n"
+ "}\n\n";
+}
+
+static void output_info_type(Class *cla, ostream &f)
+{
+ Interface *conc = cla->concrete_iface;
+ f << "struct IDL_info_type {\n";
+
+ for (int i = 0; i < conc->get_num_chains(); i++) {
+ Interface *iface = conc->get_chain_head(i);
+
+ // Skip the "fake" aggregation interface on the first chain.
+ if (i == 0)
+ iface = *iface->supers_begin();
+
+ f << '\t';
+ cpp_output_name(f, iface, "_i_");
+ f << "::info_type p" << i << ";\n";
+ }
+
+ f << "};\n\n"
+ "static IDL_info_type IDL_info;\n\n";
+}
+
+static void output_mixin(Class *cla, string &filename)
+{
+ ofstream f;
+
+ f.open(filename.c_str());
+ if (!f.is_open()) {
+ fprintf(stderr, "Could not create output file \"%s\": %s.\n",
+ filename.c_str(), strerror(errno));
+
+ throw UserError();
+ }
+
+ output_ifaceptrs(cla, f);
+ output_info_type(cla, f);
+
+ f << "// The classptr method(s) can be used to get a class pointer from\n"
+ "// an interface reference. If the interface reference does\n"
+ "// not refer to an instance of the class, it returns NULL.\n\n";
+
+ for (Class::ifaces_iterator i = cla->ifaces_begin();
+ i != cla->ifaces_end(); ++i)
+ {
+ output_classptr(cla, *i, f);
+ }
+
+ f << "// Used by auto-generated stubs which know that the interface is of\n"
+ "// the proper class. Do not call this from user code.\n\n"
+ "static " << **cla->name << " *_classptr_internal(void *ptr)\n"
+ "{\n"
+ "\tIDL_ifaceptr *wrap = static_cast<IDL_ifaceptr *>(ptr);\n"
+ "\treturn wrap->priv;\n"
+ "}\n\n";
+
+ f << "IDL_ifaceptr IDL_ref;\n\n";
+
+ f << "// The implicit conversion function(s) below can be used to convert\n"
+ "// a class reference to an interface reference. Note that if you\n"
+ "// have a pointer, rather than a reference, you will need to dereference\n"
+ "// it prior to casting to the desired interface type.\n\n";
+
+ for (Class::ifaces_iterator i = cla->ifaces_begin();
+ i != cla->ifaces_end(); ++i)
+ {
+ output_convfunc(cla, *i, f);
+ }
+
+ f << "// This method must be called prior to using any interface references\n"
+ << "// to this object. If a method is called through an interface pointer\n"
+ << "// before this is done, a memory fault is likely.\n\n"
+ << "void init_iface()\n"
+ << "{\n"
+ << "\tIDL_ref.priv = this;\n"
+ << "}\n";
+}
+
+struct callback_data {
+ Class *cla;
+ ostream &f;
+};
+
+static void output_method_wrapper(Class *cla, Interface *iface,
+ Method *meth, Class::MethodInfo *mi,
+ bool copy, ostream &f)
+{
+ const String *implname_idlns, *implname_scope;
+
+ if (mi) {
+ implname_idlns = mi->implname->flatten("_IDLNS_");
+ implname_scope = mi->implname->flatten("::");
+ } else {
+ implname_idlns = implname_scope = meth->name;
+ assert(!copy);
+ }
+
+ Indent indent = { 1, implname_idlns->length() + 6 };
+
+ f << "\tvoid ";
+
+ if (copy)
+ f << "_copy_";
+
+ f << *implname_idlns << "(void *_this";
+
+ for (Method::entries_iterator i = meth->entries_begin();
+ i != meth->entries_end(); ++i)
+ {
+ f << ",\n" << indent;
+ cpp_output_one_param(f, *i, true, copy);
+ }
+
+ const String *classname = cla->get_fq_name()->flatten("::");
+
+ f << ")\n"
+ "\t{\n";
+
+ int chain = cla->concrete_iface->super_to_chain(iface);
+ if (chain) {
+ f << "\t\t_this = reinterpret_cast<void *>"
+ "(reinterpret_cast<uintptr_t>(_this) - "
+ << chain * target->pointer_size << ");\n";
+ }
+
+ f << "\t\t::" << *classname << " *_ptr = ::" << *classname
+ << "::_classptr_internal(_this);\n"
+ "\t\t_ptr->" << *implname_scope << '(';
+
+ indent = (Indent){ 2, implname_scope->length() + 7 };
+
+ for (Method::entries_iterator i = meth->entries_begin();
+ i != meth->entries_end(); ++i)
+ {
+ if (i != meth->entries_begin())
+ f << ",\n" << indent;
+
+ Param *p = *i;
+ bool copy_this_param = false;
+ if (copy) {
+ Class::ParamInfo *pi = mi->get_param(p);
+ if (pi && pi->copy)
+ copy_this_param = true;
+ }
+
+ // FIXME: copy non-arrays
+
+ f << **p->name;
+
+ if (copy_this_param && p->is_array())
+ f << ".copy()";
+ }
+
+ f << ");\n"
+ "\t}\n\n";
+}
+
+// Should be static, but GCC won't accept it as a template parameter
+// even though the template is only used from this file (and thus
+// won't be instantiated elsewhere (unless some template consolidation
+// mechanism puts them all in one file or something, but then an
+// exception could be made for templates with static parameters,
+// as they can be placed in their original files with no chance
+// of a duplicate)).
+
+void cpp_output_iface_method_wrappers(Interface *super, void *arg)
+{
+ callback_data *data = static_cast<callback_data *>(arg);
+
+ for (Interface::methods_iterator i = super->methods_begin();
+ i != super->methods_end(); ++i)
+ {
+ Method *m = *i;
+ Class::MethodInfo *mi = data->cla->get_method(m);
+
+ output_method_wrapper(data->cla, super, m, mi, false, data->f);
+
+ if (mi && mi->copy_params)
+ output_method_wrapper(data->cla, super, m, mi, true, data->f);
+ }
+}
+
+struct Context {
+ Class *cla;
+ ostream &f;
+ Indent indent;
+ int chain;
+ Interface *cur;
+};
+
+void cpp_output_iface_table_entry(Interface *super, void *arg)
+{
+ Context &ctx(*static_cast<Context *>(arg));
+
+ ctx.f << ctx.indent << "{\n";
+ ctx.indent.indent_level++;
+
+ ctx.f << ctx.indent;
+ cpp_output_name(ctx.f, super);
+
+ ctx.f << "_ns::_guid.l,\n"
+ << ctx.indent
+ << (ctx.cla->concrete_iface->super_to_chain(super) - ctx.chain) *
+ target->pointer_size << '\n';
+
+ ctx.indent.indent_level--;
+ ctx.f << ctx.indent << "},\n";
+}
+
+void cpp_output_method_table_entries(Interface *super, void *arg)
+{
+ Context &ctx(*static_cast<Context *>(arg));
+
+ for (Interface::methods_iterator i = super->methods_begin();
+ i != super->methods_end(); ++i)
+ {
+ Method *meth = *i;
+ Class::MethodInfo *mi = ctx.cla->get_method(meth);
+ const String *implname;
+
+ if (mi)
+ implname = mi->implname->flatten("::");
+ else
+ implname = meth->name;
+
+ ctx.f << ctx.indent << "IDL_Server_"
+ << *ctx.cla->get_fq_name()->flatten("_IDLNS_")
+ << "::" << *implname << ",\n";
+ }
+}
+
+static void output_info_type_init_rec(Context &ctx)
+{
+ Interface *iface = ctx.cur;
+
+ ctx.f << ctx.indent << "{ // " << *iface->get_fq_name()->flatten() << '\n';
+ ctx.indent.indent_level++;
+
+ if (!iface->supers_empty()) {
+ ctx.cur = *iface->supers_begin();
+ output_info_type_init_rec(ctx);
+ } else {
+ // Concrete pointer adjustment
+ ctx.f << ctx.indent << '-'
+ << ctx.chain * target->pointer_size << ",\n";
+
+ // Pointer to concrete downcast table
+ ctx.f << ctx.indent << "&IDL_info.p0.iface_table.";
+
+ Interface *iface = *ctx.cla->concrete_iface->supers_begin();
+ ctx.f << *iface->get_fq_name()->flatten("_IDLNS_") << ",\n";
+ }
+
+ // Interface table for downcasts
+ ctx.f << ctx.indent << "{\n";
+ ctx.indent.indent_level++;
+
+ cpp_output_iface_table_entry(iface, &ctx);
+ iface->for_each_super<cpp_output_iface_table_entry>(&ctx);
+
+ ctx.f << ctx.indent << "NULL\n";
+
+ ctx.indent.indent_level--;
+ ctx.f << ctx.indent << "},\n";
+
+ // Method table
+
+ ctx.f << ctx.indent << "{\n";
+ ctx.indent.indent_level++;
+
+ cpp_output_method_table_entries(iface, &ctx);
+ iface->for_each_super<cpp_output_method_table_entries>(&ctx);
+
+ ctx.indent.indent_level--;
+ ctx.f << ctx.indent << "},\n";
+
+ ctx.indent.indent_level--;
+ ctx.f << ctx.indent << "},\n";
+}
+
+static void output_info_type_init(Class *cla, ostream &f)
+{
+ Indent indent = (Indent){1, 0};
+
+ // The struct name is enclosed in parentheses to disambiguate the
+ // leading :: from being a continuation of the type name.
+
+ cpp_output_name(f, cla);
+ f << "::IDL_info_type (";
+ cpp_output_name(f, cla);
+ f << "::IDL_info) =\n"
+ "{\n";
+
+ Context ctx = { cla, f };
+ ctx.indent.indent_level = 1;
+
+ for (ctx.chain = 0; ctx.chain < cla->concrete_iface->get_num_chains();
+ ctx.chain++)
+ {
+ ctx.cur = cla->concrete_iface->get_chain_head(ctx.chain);
+
+ // Skip the "fake" aggregation interface on the first chain.
+ if (ctx.chain == 0)
+ ctx.cur = *ctx.cur->supers_begin();
+
+ output_info_type_init_rec(ctx);
+ }
+
+ assert(ctx.indent.indent_level == 1);
+ f << "};\n\n";
+}
+
+static void output_footer(Class *cla, ostream &f)
+{
+ // Create an arbitrary, unique name to stick the method wrappers in.
+ f << "\n\nnamespace IDL_Server_" << *cla->get_fq_name()->flatten("_IDLNS_")
+ << " {\n";
+
+ callback_data data = { cla, f };
+
+ cla->concrete_iface->for_each_super
+ <cpp_output_iface_method_wrappers>(&data);
+
+ f << "};\n\n";
+
+ output_info_type_init(cla, f);
+
+ cpp_output_name(f, cla);
+ f << "::IDL_ifaceptr::IDL_ifaceptr() :";
+
+ for (int i = 0; i < cla->concrete_iface->get_num_chains(); i++) {
+ if (i != 0)
+ f << ',';
+
+ f << "\np" << i << "(&IDL_info.p" << i << ")";
+ }
+
+ f << "\n{\n"
+ "}\n";
+}
+
+void CPPBinding::output_server(UserNameSpace *ns, const char *dir)
+{
+ ofstream footer;
+
+ string footer_name(dir);
+ footer_name += "/footer.cc";
+
+ if (makedep) {
+ printf("%s: %s\n", footer_name.c_str(), dir);
+ } else {
+ footer.open(footer_name.c_str());
+ if (!footer.is_open()) {
+ fprintf(stderr, "Could not create output file \"%s\": %s.\n",
+ footer_name.c_str(), strerror(errno));
+
+ throw UserError();
+ }
+
+ footer << "// This is a C++ server binding generated by idlc.\n"
+ "// Do not modify this file directly.\n"
+ "// Include this file from exactly one file that has access to\n"
+ "// the class and interface declarations (and preferably, with\n"
+ "// access to to inlineable methods as well).";
+ }
+
+ for (list<ClassRef>::iterator i = classes.begin(); i != classes.end(); ++i) {
+ Class *cla = *i;
+
+ string filename(dir);
+ filename += '/';
+ filename += *cla->get_ns()->get_fq_name()->flatten("/");
+
+ makepath(filename.c_str(), false);
+
+ filename += '/';
+ filename += **cla->name;
+ filename += ".h";
+
+ if (makedep) {
+ printf("%s: %s\n", filename.c_str(), dir);
+ } else {
+ output_mixin(cla, filename);
+ output_footer(cla, footer);
+ }
+ }
+}
--- /dev/null
+OBJS += languages/c/main.o
--- /dev/null
+/* languages/c/main.cc -- C language binding
+ *
+ * Written by Scott Wood <scott@buserror.net>
+ */
+
+#include "lang.h"
+
+class CBinding : public Language {
+public:
+ CBinding()
+ {
+ name = "C";
+ next = first_lang;
+ first_lang = this;
+ }
+
+ void output(Symbol *sym)
+ {
+ }
+};
+
+CBinding cbinding;
--- /dev/null
+/* main.cc -- entry point for the interface compiler
+ *
+ * Written by Scott Wood <scott@buserror.net>
+ *
+ * The idlc program implements two main functions. One is to turn
+ * .idl files into binary type descriptions. The other is to turn
+ * those type descriptions into language-specific stubs (though a
+ * compiler/interpreter could also just read the binary types
+ * directly, as the ORB does).
+ *
+ * For the former function, it is necessary to do the work in multiple
+ * passes, as not all information is available the first time through due
+ * to the absence of forward declarations. The passes are as follows:
+ *
+ * 1: Parse the input text, and determine which symbols exist in
+ * which namespaces. At this point, no non-namespace-qualified
+ * symbol lookups will be done (though namespace-qualified lookups
+ * are done, such as to check for duplicate symbols).
+ *
+ * Objects will be created for each entity (Interface, Struct,
+ * Datum, Alias, etc.), but any reference to another entity will be
+ * stored as a StrList, not as an object pointer. This must be the
+ * case even if a lookup during this pass would have found
+ * something, as on the second pass it might find a "closer"
+ * matching symbol added later.
+ *
+ * The StrList will later be looked up with only the entity
+ * containing the reference as context, so namespace search rules
+ * must not depend on anything else.
+ *
+ * This is the only pass that involves actually reading and
+ * parsing the input file(s).
+ *
+ * 2: Look up the namespace names of using namespace.* statements.
+ * These are the only lookups where order of the statements in the
+ * file matters, and it alters the result of a lookup, so it must be
+ * completed before chain lookups are done.
+ *
+ * 3: Look up the symbol names recorded in pass 1 for aliases, and
+ * typedefs, and const datum initializers and types. This is done
+ * before the rest of the lookups, so there will always be a
+ * complete chain to follow in subsequent passes.
+ *
+ * This is done by calling the virtual lookup_chain() method on
+ * the root namespace, which recursively calls it on all of its
+ * contents.
+ *
+ * 4: Look up other symbol names recorded in pass 1. At this point,
+ * unresolved alias chains may still exist and must be handled,
+ * though they should be fixed during this phase so that they do not
+ * appear in phase 4. Typedefs will remain fully chained, as the
+ * chain is semantically meaningful and must be preserved in the
+ * output.
+ *
+ * Const datum initializers are looked up in this pass despite being
+ * chains, since aliases aren't resolved in pass 2, and it's
+ * (slightly) easier to do the lookup in pass 3 and resolve the
+ * chain in pass 4 than to store a Symbol reference in pass 2,
+ * get the concerete sym in pass 3, and still have to either
+ * concrete-ize other Datums' initializers or wait until pass 4
+ * to resolve the chain.
+ *
+ * This is done by calling the virtual lookup_misc() method on the
+ * root namespace, which recursively calls it on all of its
+ * contents. Upon receiving this call, each entity object should
+ * call lookup_sym() or lookup_type() on the various StrLists it has
+ * stored, but should not do anything with the object pointer
+ * until pass 4 (unless the object was satisfactorily initialized
+ * in pass 1).
+ *
+ * 5: Perform any remaining semantic analysis, now that all references
+ * to other entities have been looked up.
+ *
+ * This is done by calling the virtual final_analysis() method on
+ * the root namespace, which recursively calls it on all of its
+ * contents.
+ *
+ * 6: Generate output.
+ * This is done by calling the virtual output() method on
+ * the root namespace, which recursively calls it on all of its
+ * contents.
+ *
+ * All circular dependencies must be contained within the set of files
+ * passed to the compiler in one invocation. Supporting circular
+ * dependencies over multiple invocations of the compiler would require
+ * explicitly running only certain passes, then compiling the other
+ * batch, and then finishing the original batch. It could be done with
+ * some pain (intermediate states would need to be serialized), but I
+ * only want to do it if there's a real need. Circular dependencies
+ * ought to be isolated fairly locally...
+ */
+
+#include <climits>
+#include <cstdio>
+#include <cstdarg>
+#include <cstring>
+#include <cstdlib>
+#include <cerrno>
+
+#include <idlc.h>
+#include <lang.h>
+#include <targets.h>
+#include <util.h>
+
+int num_err;
+unsigned int enum_pos;
+bool no_output;
+bool server_stubs;
+bool makedep;
+int current_pass;
+int traversal;
+Con *yylval_con;
+String **yylval_string;
+
+UserNameSpaceRef toplevel, cdl_toplevel;
+UserNameSpace *output_ns;
+NameSpaceRef cur_nspace;
+const char *cmdname, *output_dir, *cur_input_file = "<none>", *output_ns_name;
+
+list<const char *> inputs;
+list<NameSpaceRef> nspace_stack;
+Language *first_lang, *output_lang;
+Interface *System_Object;
+Struct *System_VStruct;
+AutoReleasePool autorelease_pool;
+
+void print_usage()
+{
+ fprintf(stderr,
+"Usage: %s [options] [<input(s)>]\n\n"
+"Options are:\n"
+"-d (--debug) Print parser debugging information.\n"
+"-h (--help) Print this help screen.\n"
+"-i DIR (--include=DIR) Look in DIR for resolving external type definitions.\n"
+" Multiple include directories can be specified, and\n"
+" will be mounted at the --namespace specified when\n"
+" they were created.\n"
+"-l LANG (--lang=LANG) Generate stubs for this language.\n"
+"-M (--makedep) Output dependency information for make.\n"
+"-n (--no-output) Do not produce output; only check for errors.\n"
+"-o DIR (--output=DIR) Generate output in DIR.\n"
+"-r (--server) Generate server stubs according to the input CDL files.\n"
+"-s NS (--namespace=NS) Input files constitute the given namespace.\n"
+"-t TARGET (--target=T) Generate stubs for the specified target architecture.\n"
+" By default, the current architecture is targeted.\n"
+"--show-languages List languages for which stub generation is supported.\n"
+"--show-targets List supported target architectures.\n"
+"If a language is specified, language-specific stubs for the output --namespace\n"
+"will be placed in the specified output directory. All inputs must be specified\n"
+"as --includes (including all components of the output namespace and anything\n"
+"referenced by them). No non-option inputs may be specified.\n\n"
+"If a language is not specified, inputs are treated as IDL source\n"
+"files, and are compiled into the specified output directory, as\n"
+"the specified output --namespace.\n\n",
+cmdname);
+}
+
+static void set_lang(const char *lang)
+{
+ if (output_lang) {
+ fprintf(stderr, "Only one language may be specified.\n");
+ throw UserError();
+ }
+
+ for (Language *l = first_lang; l; l = l->next) {
+ if (!strcasecmp(l->name, lang)) {
+ output_lang = l;
+ return;
+ }
+ }
+
+ fprintf(stderr, "Unsupported language \"%s\".\n", lang);
+ throw UserError();
+}
+
+static void set_target(const char *targ)
+{
+ if (target != &targets[0]) {
+ fprintf(stderr, "Only one target architecture may be specified.\n");
+ throw UserError();
+ }
+
+ for (int i = 1; i <= max_target; i++) {
+ if (!strcasecmp(targ, targets[i].name)) {
+ target = &targets[i];
+ return;
+ }
+ }
+
+ fprintf(stderr, "Unsupported target \"%s\".\n", targ);
+ throw UserError();
+}
+
+// Returns true if the argument is consumed.
+static bool shortopt(char c, const char *arg)
+{
+ switch (c) {
+ case 'd':
+ idl_debug = 1;
+ cdl_debug = 1;
+ break;
+
+ case 'h':
+ print_usage();
+ exit(0);
+
+ case 'i':
+ if (!arg) {
+ fprintf(stderr, "The -i option requires an argument.\n");
+ throw UserError();
+ }
+
+ UserNameSpace::declare_import(arg);
+ return true;
+
+ case 'M':
+ makedep = true;
+ break;
+
+ case 'n':
+ no_output = true;
+ break;
+
+ case 'l':
+ if (!arg) {
+ fprintf(stderr, "The -l option requires an argument.\n");
+ throw UserError();
+ }
+
+ set_lang(arg);
+ return true;
+
+ case 'o':
+ if (output_dir) {
+ fprintf(stderr, "Only one output directory may be specified.\n");
+ throw UserError();
+ }
+
+ if (!arg) {
+ fprintf(stderr, "The -o option requires an argument.\n");
+ throw UserError();
+ }
+
+ output_dir = arg;
+ return true;
+
+ case 'r':
+ server_stubs = true;
+ break;
+
+ case 's':
+ if (output_ns_name) {
+ fprintf(stderr, "Only one output namespace may be specified.\n");
+ throw UserError();
+ }
+
+ output_ns_name = arg;
+ return true;
+
+ case 't':
+ if (!arg) {
+ fprintf(stderr, "The -t option requires an argument.\n");
+ throw UserError();
+ }
+
+ set_target(arg);
+ return true;
+
+ default:
+ fprintf(stderr, "%s: unknown option %c\n", cmdname, c);
+ throw UserError();
+ }
+
+ return false;
+}
+
+static void longopt(const char *s, const char *arg)
+{
+ if (!strcmp(s, "help")) {
+ print_usage();
+ exit(0);
+ } else if (!strcmp(s, "debug")) {
+ idl_debug = 1;
+ cdl_debug = 1;
+ } else if (!strcmp(s, "include")) {
+ if (!arg) {
+ fprintf(stderr, "The --include option requires an argument.\n");
+ throw UserError();
+ }
+
+ UserNameSpace::declare_import(arg);
+ } else if (!strcmp(s, "makedep")) {
+ makedep = true;
+ } else if (!strcmp(s, "no-output")) {
+ no_output = true;
+ } else if (!strcmp(s, "output")) {
+ if (output_dir) {
+ fprintf(stderr, "Only one output directory may be specified.\n");
+ throw UserError();
+ }
+
+ if (!arg) {
+ fprintf(stderr, "The --output option requires an argument.\n");
+ throw UserError();
+ }
+
+ output_dir = arg;
+ } else if (!strcmp(s, "namespace")) {
+ if (output_ns_name) {
+ fprintf(stderr, "Only one output namespace may be specified.\n");
+ throw UserError();
+ }
+
+ if (!arg) {
+ fprintf(stderr, "The --namespace option requires an argument.\n");
+ throw UserError();
+ }
+
+ output_ns_name = arg;
+ } else if (!strcmp(s, "language")) {
+ if (!arg) {
+ fprintf(stderr, "The --language option requires an argument.\n");
+ throw UserError();
+ }
+
+ set_lang(arg);
+ } else if (!strcmp(s, "target")) {
+ if (!arg) {
+ fprintf(stderr, "The --target option requires an argument.\n");
+ throw UserError();
+ }
+
+ set_target(arg);
+ } else if (!strcmp(s, "show-languages")) {
+ printf("Supported language bindings:\n");
+ for (Language *l = first_lang; l; l = l->next)
+ printf(" %s\n", l->name);
+ printf("\n");
+ exit(0);
+ } else if (!strcmp(s, "show-targets")) {
+ printf("Supported target architectures:\n");
+ for (int i = 1; i <= max_target; i++)
+ printf(" %s\n", targets[i].name);
+ printf("\n");
+ exit(0);
+ } else if (!strcmp(s, "server")) {
+ server_stubs = true;
+ } else {
+ fprintf(stderr, "%s: unknown option \"%s\"\n", cmdname, s);
+ throw UserError();
+ }
+}
+
+static int global_argc;
+static const char **global_argv;
+static int got_dashdash;
+
+static void process_args(void)
+{
+ int i;
+ size_t j;
+ char *s;
+
+ for (i = 1; i < global_argc; i++) {
+ if (global_argv[i][0] == '-' && global_argv[i][1] && !got_dashdash) {
+ const char *opt = global_argv[i];
+
+ if (opt[1] == '-') {
+ if (!opt[2]) {
+ got_dashdash = 1;
+ } else {
+ s = strchr(&opt[2], '=');
+
+ if (!s)
+ longopt(&opt[2], NULL);
+ else {
+ *s=0;
+ longopt(&opt[2], s+1);
+ }
+ }
+ } else for (j = 1; j < strlen(opt); j++) {
+ if (shortopt(opt[j], global_argv[i + 1]))
+ i++;
+ }
+ } else {
+ inputs.push_back(global_argv[i]);
+ }
+ }
+}
+
+// Provide automatic closing of a file handle upon exception
+struct FileHolder {
+ FILE *f;
+
+ FileHolder()
+ {
+ f = NULL;
+ }
+
+ ~FileHolder()
+ {
+ if (f)
+ fclose(f);
+ }
+};
+
+void parse_inputs(bool cdl)
+{
+ for (list<const char *>::iterator i = inputs.begin();
+ i != inputs.end(); ++i)
+ {
+ FileHolder fh;
+ cur_input_file = *i;
+
+ curline = 1;
+
+ if (strcmp(cur_input_file, "-")) {
+ yyin = fh.f = fopen(cur_input_file, "r");
+ if (!yyin) {
+ fprintf(stderr, "Cannot open input file \"%s\": %s\n",
+ cur_input_file, strerror(errno));
+ throw UserError();
+ }
+ } else {
+ yyin = stdin;
+ cur_input_file = "stdin";
+ }
+
+ if (cdl) {
+ cur_nspace = cdl_toplevel;
+ setup_cdlparse();
+
+ if (cdl_parse())
+ throw UserError();
+ } else {
+ cur_nspace = output_ns;
+ setup_idlparse();
+
+ if (idl_parse())
+ throw UserError();
+ }
+
+ if (finish_lex() || num_err)
+ throw UserError();
+ }
+
+ cur_input_file = "<none>";
+ curline = 0;
+
+ if (!nspace_stack.empty())
+ BUG();
+}
+
+static void init_output_ns()
+{
+ StrList *strl;
+
+ try {
+ String *str = new String(output_ns_name);
+ strl = new StrList(str);
+ }
+
+ catch (InvalidArgument) {
+ fprintf(stderr, "Output namespace \"%s\" is not vaild.\n",
+ output_ns_name);
+ throw UserError();
+ }
+
+ cur_nspace = toplevel;
+ NameSpace *new_ns = add_nspace(strl, false);
+
+ if (!new_ns)
+ BUG();
+
+ NameSpace *conflict = check_for_imports(new_ns);
+ if (conflict) {
+ fprintf(stderr, "Output namespace \"%s\" conflicts"
+ " with \"%s\" at \"%s\".\n",
+ output_ns_name, conflict->get_fq_name()->flatten()->c_str(),
+ conflict->get_path()->c_str());
+
+ throw UserError();
+ }
+
+ if (!new_ns)
+ BUG();
+ output_ns = dynamic_cast<UserNameSpace *>(new_ns);
+ if (!output_ns)
+ BUG();
+}
+
+void lookup_system_object()
+{
+ try {
+ StrList *sl = new StrList;
+
+ sl->push_back(new String(""));
+ sl->push_back(new String("System"));
+ sl->push_back(new String("Object"));
+
+ Symbol *sym = lookup_sym(toplevel, sl, toplevel);
+ System_Object = dynamic_cast<Interface *>(sym);
+ }
+
+ catch (UserError) {
+ yyerrorf("Could not find System.Object.");
+ yyerrorf("Be sure to include the file or directory containing "
+ "the System namespace.");
+
+ throw UserError();
+ }
+
+ if (!System_Object) {
+ yyerrorf("System.Object is not an interface.");
+ throw UserError();
+ }
+}
+
+void lookup_system_vstruct()
+{
+ try {
+ StrList *sl = new StrList();
+
+ sl->push_back(new String(""));
+ sl->push_back(new String("System"));
+ sl->push_back(new String("VStruct"));
+
+ Symbol *sym = lookup_sym(toplevel, sl, toplevel);
+ System_VStruct = dynamic_cast<Struct *>(sym);
+ }
+
+ catch (UserError) {
+ yyerrorf("Could not find System.VStruct.");
+ yyerrorf("Be sure to include the file or directory containing "
+ "the System namespace.");
+
+ throw UserError();
+ }
+
+ if (!System_VStruct) {
+ yyerrorf("System.VStruct is not a struct.");
+ throw UserError();
+ }
+
+ if (!System_VStruct->is_virtual()) {
+ yyerrorf("System.VStruct is not virtual.");
+ throw UserError();
+ }
+}
+
+void normal_output()
+{
+ if (inputs.empty()) {
+ fprintf(stderr, "No inputs given.\n");
+ print_usage();
+ throw UserError();
+ }
+
+ if (makedep) {
+ fprintf(stderr, "Dependency generation is only supported "
+ "when generating language output.\n");
+ throw UserError();
+ }
+
+ if (!no_output && !output_dir) {
+ fprintf(stderr, "An output directory must be specified "
+ "when not using -n.\n");
+ throw UserError();
+ }
+
+ if (!output_ns_name) {
+ fprintf(stderr, "An output namespace must be specified.\n");
+ throw UserError();
+ }
+
+ if (!no_output && output_dir)
+ makepath(output_dir);
+
+ init_output_ns();
+
+ current_pass = 1;
+ autorelease_pool.clean();
+
+ parse_inputs(false);
+
+ current_pass = 2;
+ autorelease_pool.clean();
+
+ lookup_system_object();
+ lookup_system_vstruct();
+ output_ns->lookup_imports();
+
+ current_pass = 3;
+ autorelease_pool.clean();
+
+ output_ns->lookup_chain();
+ if (num_err)
+ throw UserError();
+
+ current_pass = 4;
+ autorelease_pool.clean();
+
+ output_ns->lookup_misc();
+ if (num_err)
+ throw UserError();
+
+ current_pass = 5;
+ autorelease_pool.clean();
+
+ output_ns->final_analysis();
+ if (num_err)
+ throw UserError();
+
+ current_pass = 6;
+ autorelease_pool.clean();
+
+ if (!no_output)
+ output_ns->output(output_dir);
+
+ autorelease_pool.clean();
+}
+
+void language_output()
+{
+ if (server_stubs) {
+ if (inputs.empty()) {
+ fprintf(stderr, "No inputs given.\n");
+ print_usage();
+ throw UserError();
+ }
+ } else {
+ if (!inputs.empty()) {
+ fprintf(stderr, "Inputs may not be specified when generating "
+ "language-specific client output.\n"
+ "Use --include to specify input namespaces.\n");
+ throw UserError();
+ }
+
+ if (!output_ns_name) {
+ fprintf(stderr, "An output namespace must be specified.\n");
+ throw UserError();
+ }
+
+ if (makedep) {
+ fprintf(stderr, "Dependency information is currently only supported "
+ "for server stubs.\n");
+ throw UserError();
+ }
+ }
+
+ if (no_output) {
+ fprintf(stderr, "-n may not be used when generating "
+ "language-specific output.\n");
+ throw UserError();
+ }
+
+ if (!output_dir) {
+ fprintf(stderr, "An output directory must be specified when "
+ "generating language-specific output.\n");
+ throw UserError();
+ }
+
+ if (!makedep && target == &targets[0]) {
+ fprintf(stderr, "Target detection not yet implemented.\n"
+ "Please specify a target architecture.\n");
+ throw UserError();
+ }
+
+ if (!makedep)
+ makepath(output_dir);
+
+ if (server_stubs) {
+ current_pass = INT_MAX;
+ lookup_system_object();
+ lookup_system_vstruct();
+ parse_inputs(true);
+ autorelease_pool.clean();
+ output_lang->output_server(cdl_toplevel, output_dir);
+ } else {
+ StrList *output_ns_strl;
+ try {
+ String *output_ns_str = new String(output_ns_name);
+ output_ns_strl = new StrList(output_ns_str);
+ }
+
+ catch (InvalidArgument) {
+ fprintf(stderr, "Output namespace \"%s\" is not vaild.\n",
+ output_ns_name);
+ throw UserError();
+ }
+
+ cur_nspace = toplevel;
+ current_pass = INT_MAX;
+ lookup_system_object();
+ lookup_system_vstruct();
+
+ Symbol *sym = lookup_sym(toplevel, output_ns_strl, toplevel);
+ output_ns = dynamic_cast<UserNameSpace *>(sym);
+ if (!output_ns) {
+ fprintf(stderr, "Output namespace \"%s\" is not a pure namespace.\n",
+ output_ns_name);
+ throw UserError();
+ }
+
+ output_ns->import_all_recursive();
+ autorelease_pool.clean();
+ output_lang->output_root(output_ns, output_dir);
+ }
+
+ autorelease_pool.clean();
+}
+
+int run_idlc(int argc, const char **argv)
+{
+ global_argc = argc;
+ global_argv = argv;
+ cmdname = argv[0];
+
+ toplevel = new UserNameSpace();
+ cdl_toplevel = new UserNameSpace();
+
+ try {
+ process_args();
+
+ if (output_lang)
+ language_output();
+ else {
+ if (server_stubs) {
+ fprintf(stderr, "The -r (--server) option may only be used "
+ "with the -l (--lang) option.\n");
+
+ throw UserError();
+ }
+
+ normal_output();
+ }
+ }
+
+ catch (InternalError &e) {
+ fprintf(stderr, "Internal idlc error at %s:%d\n",
+ e.file, e.line);
+ return 1;
+ }
+
+ catch (InvalidArgument) {
+ fprintf(stderr, "Internal idlc error: Uncaught InvalidArgument\n",
+ cur_input_file, curline);
+ return 1;
+ }
+
+ catch (SymbolNotFound) {
+ fprintf(stderr, "Internal idlc error: Uncaught SymbolNotFound\n");
+ return 1;
+ }
+
+ catch (DuplicateSymbol) {
+ fprintf(stderr, "Internal idlc error: Uncaught DuplicateSymbol\n");
+ return 1;
+ }
+
+ catch (UserError) {
+ // An error message has already been displayed.
+ num_err++;
+ }
+
+ catch (...) {
+ fprintf(stderr, "%s:%d: Internal error: Uncaught Exception\n",
+ cur_input_file, curline);
+ return 1;
+ }
+
+ if (num_err)
+ return 1;
+
+ return 0;
+}
+
+int main(int argc, const char **argv)
+{
+ int ret = run_idlc(argc, argv);
+ autorelease_pool.clean();
+ return ret;
+}
+
+extern "C" int yywrap()
+{
+ return 1;
+}
+
+extern char *yytext;
+
+void idl_error(char *s)
+{
+ if (strlen(yytext))
+ fprintf(stderr, "%s:%d: %s at \"%s\".\n", cur_input_file, curline, s, yytext);
+ else
+ fprintf(stderr, "%s:%d: %s at end of input.\n", cur_input_file, curline, s);
+
+ num_err++;
+}
+
+void yyerrorf(const char *s, ...)
+{
+ va_list va;
+ va_start(va, s);
+ fprintf(stderr, "%s:%d: ", cur_input_file, curline);
+ vfprintf(stderr, s, va);
+ fprintf(stderr, "\n");
+ va_end(va);
+
+ num_err++;
+}
+
+void yyerrorfl(const char *file, int line, const char *s, ...)
+{
+ va_list va;
+ va_start(va, s);
+ fprintf(stderr, "%s:%d: ", file, line);
+ vfprintf(stderr, s, va);
+ fprintf(stderr, "\n");
+ va_end(va);
+
+ num_err++;
+}
+
+int idl_lex()
+{
+ return yylex();
+}
--- /dev/null
+/* namespace.cc -- Code to maintain and search namespaces
+ *
+ * Written by Scott Wood <scott@buserror.net>
+ */
+
+#include <idlc.h>
+#include <parser.h>
+
+void NameSpace::lookup_imports()
+{
+ for (list<StrListRef>::iterator i = import_strs.begin();
+ i != import_strs.end(); ++i)
+ {
+ StrList *strl = *i;
+ Symbol *sym = lookup_sym(toplevel, strl, this);
+ NameSpace *ns = dynamic_cast<NameSpace *>(sym);
+
+ if (!ns) {
+ const String *str = strl->back();
+ yyerrorfl(str->file, str->line, "\"%s\" is not a namespace.",
+ sym->get_fq_name()->flatten()->c_str());
+ throw UserError();
+ }
+
+ imports.push_back(ns);
+ }
+
+ for (const_iterator i = begin(); i != end(); ++i) {
+ Symbol *sym = (*i).second;
+ sym->lookup_imports();
+ }
+}
+
+
+// Find the namespace in which "name" is declared.
+// The rules for namespace precedence are in doc/idl/namespace-precedence.
+
+NameSpace *NameSpace::search(const String *name, Symbol *exclude)
+{
+ // Rule #1: Check current namespace first.
+ Symbol *sym = lookup_noex(name, true);
+ if (sym && sym != exclude)
+ return this;
+
+ // Rule #2: Check imported symbols
+ Symbol *found = NULL;
+
+ for (list<NameSpaceRef>::iterator i = imports.begin();
+ i != imports.end(); ++i)
+ {
+ NameSpace *ns = *i;
+ Symbol *newfound = ns->lookup_noex(name, true);
+
+ if (newfound) {
+ if (newfound == exclude)
+ continue;
+
+ if (found && found != newfound) {
+ yyerrorfl(name->file, name->line,
+ "\"%s\" is ambiguous. Two possibilities "
+ "(there may be more) are:",
+ name->c_str());
+
+ yyerrorfl(found->name->file, found->name->line, " \"%s\"",
+ found->get_fq_name()->flatten()->c_str());
+ yyerrorfl(newfound->name->file, newfound->name->line, " \"%s\"",
+ newfound->get_fq_name()->flatten()->c_str());
+
+ throw UserError();
+ }
+
+ found = newfound;
+ }
+ }
+
+ if (found)
+ return found->get_ns();
+
+ return NULL;
+}
+
+static NameSpace *search_for_namespace(const String *name, NameSpace *ctx,
+ Symbol *exclude)
+{
+ while (ctx) {
+ NameSpace *ret = ctx->search(name, exclude);
+
+ if (ret)
+ return ret;
+
+ ctx = ctx->get_ns();
+ }
+
+ yyerrorfl(name->file, name->line,
+ "Unknown symbol \"%s\" in namespace search path.",
+ name->c_str());
+
+ throw UserError();
+}
+
+Symbol *lookup_sym(NameSpace *ns, StrList *name, NameSpace *ctx,
+ Symbol *exclude)
+{
+ Symbol *sym = NULL;
+ NameSpace *top = ns;
+
+ assert(current_pass != 1);
+ assert(name->size() != 0);
+ assert(ctx);
+
+ StrList::iterator i = name->begin();
+
+ if (name->front()->length() != 0) {
+ ns = search_for_namespace(name->front(), ctx, exclude);
+
+ if (!ns)
+ ns = top;
+ } else {
+ ++i;
+ }
+
+ assert(i != name->end());
+ bool first = true;
+
+ while (i != name->end()) {
+ try {
+ sym = ns->lookup(*i, first)->get_concrete_sym();
+ }
+
+ catch (SymbolNotFound) {
+ yyerrorfl((*i)->file, (*i)->line,
+ "Unknown symbol \"%s\" in \"%s\".",
+ (*i)->c_str(), ns->get_fq_name()->flatten()->c_str());
+
+ throw UserError();
+ }
+
+ ++i;
+ first = false;
+
+ ns = dynamic_cast<NameSpace *>(sym);
+
+ if (!ns && i != name->end()) {
+ yyerrorfl((*i)->file, (*i)->line,
+ "\"%s\" is not a namespace.",
+ sym->get_fq_name()->flatten()->c_str());
+
+ throw UserError();
+ }
+ }
+
+ assert(sym);
+ return sym;
+}
+
+// Returns the namespace (new or otherwise) on success, or NULL on failure.
+NameSpace *add_nspace(StrList *name, bool push)
+{
+ StrList::iterator i = name->begin();
+ NameSpace *ns = cur_nspace;
+ Symbol *sym;
+
+ if ((*i)->length() == 0) {
+ yyerrorfl((*i)->file, (*i)->line,
+ "Namespaces cannot be declared with an absolute path.");
+ return NULL;
+ }
+
+ for (; i != name->end(); i++) {
+ if ((*i)->token == '*') {
+ yyerrorfl((*i)->file, (*i)->line,
+ "'*' is only allowed with \"using\".");
+ return NULL;
+ }
+
+ sym = ns->lookup_noex(*i, true);
+ if (!sym) {
+ UserNameSpace *new_ns;
+
+ try {
+ // Cannot throw DuplicateSymbol, but it can throw
+ // InvalidArgument due to user error (such as trying
+ // to implicitly declare a namespace inside of a
+ // struct/class/whatever.
+
+ new_ns = new UserNameSpace(*i);
+ ns->add_user(new_ns);
+ }
+
+ catch (InvalidArgument) {
+ yyerrorfl((*i)->file, (*i)->line,
+ "Cannot create namespace \"%s\" inside of a "
+ "non-namespace type or import namespace.",
+ (*i)->c_str());
+ return NULL;
+ }
+
+ ns = new_ns;
+ } else {
+ // Don't let the user declare things in non-user namespaces.
+ // Besides the headache of verifying that it's something
+ // that belongs, and of determining order for situations
+ // where it matters, it's just icky.
+
+
+ ns = dynamic_cast<UserNameSpace *>(sym);
+ if (!ns) {
+ yyerrorfl((*i)->file, (*i)->line,
+ "\"%s\" is not a namespace.",
+ sym->get_fq_name()->flatten()->c_str());
+
+ return NULL;
+ }
+ }
+ }
+
+ if (push)
+ nspace_stack.push_front(cur_nspace);
+
+ cur_nspace = ns;
+ return ns;
+}
+
+void pop_nspace()
+{
+ if (nspace_stack.empty())
+ BUG();
+
+ cur_nspace = nspace_stack.front();
+ nspace_stack.pop_front();
+}
+
+void NameSpace::add_user(Symbol *sym)
+{
+ try {
+ add(sym, false);
+ }
+
+ catch (DuplicateSymbol) {
+ yyerrorfl(sym->name->file, sym->name->line,
+ "\"%s\" already exists in this %s.",
+ sym->name->c_str(), description());
+ throw UserError();
+ }
+}
+
+void NameSpace::add_import(Symbol *sym, const char *filename)
+{
+ try {
+ add(sym, true);
+ }
+
+ catch (DuplicateSymbol) {
+ yyerrorf("\"%s\" already exists in %s.",
+ sym->name->c_str(), get_fq_name()->flatten()->c_str());
+ BUG();
+ }
+
+ catch (InvalidArgument) {
+ yyerrorf("\"%s\" caused an InvalidArgument upon add.", filename);
+ throw UserError();
+ }
+}
+
+Symbol::~Symbol()
+{
+ if (ns && !ns->dying) {
+ try {
+ ns->del(this);
+ }
+
+ catch (SymbolNotFound) {
+ fprintf(stderr, "SymbolNotFound in Symbol::~Symbol(), cannot propagate\n");
+ }
+ }
+}
+
+StrList *Symbol::get_fq_name(const char *append, bool not_last) const
+{
+ StrList *ret;
+ const String *s = NULL;
+
+ if (name) {
+ if (append && not_last &&
+ !dynamic_cast<const UserNameSpace *>(this))
+ {
+ String *mut_s = new String(name);
+ mut_s->append(append);
+ s = mut_s;
+ } else {
+ s = name;
+ }
+ } else if (!ns && !dynamic_cast<const UserNameSpace *>(this)) {
+ s = new String("<temporary>");
+ } else if (!ns) {
+ s = new String("<toplevel>");
+ } else {
+ s = new String("<anonymous>");
+ }
+
+ if (!ns || !ns->name) {
+ ret = new StrList();
+ } else {
+ ret = ns->get_fq_name(append, true);
+ }
+
+ ret->push_back(s);
+ return ret;
+}
+
+Symbol *Symbol::find_toplevel_type()
+{
+ Symbol *cur = this, *prev;
+
+ do {
+ prev = cur;
+ cur = cur->get_ns();
+ } while (!dynamic_cast<UserNameSpace *>(cur));
+
+ return prev;
+}
+
+// FIXME: Use double-dot for initial delimit, but only if
+// generating IDLC-style names. More generally, it should
+// allow an arbitrary alternate global scope prefix.
+
+String *StrList::flatten(const char *delimit)
+{
+ String *ret = new String();
+
+ for (const_iterator i = begin(); i != end();) {
+ const String *str = *i;
+ ret->append(*str);
+
+ if (++i != end())
+ ret->append(delimit);
+ }
+
+ const String *str = back();
+ ret->file = str->file;
+ ret->line = str->line;
+
+ return ret;
+}
--- /dev/null
+/* output.cc -- Code to output symbols to a legacy filesystem
+ *
+ * Written by Scott Wood <scott@buserror.net>
+ */
+
+#include <cstring>
+#include <cerrno>
+#include <vector>
+#include <memory>
+#include <cctype>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include <idlc.h>
+#include <parser.h>
+
+static string *map_sym(const char *root, Symbol *sym)
+{
+ typedef std::vector<StringRef> StringVec;
+ StringVec path;
+
+ while (sym && sym != output_ns && sym->name) {
+ const String *name = NameSpace::mangle(sym->name);
+ path.push_back(name);
+ sym = sym->get_ns();
+ }
+
+ return stringvec_to_path(path, root);
+}
+
+string *stringvec_to_path(StringVec &stringvec, const char *prepend)
+{
+ string *s = new string();
+
+ if (prepend)
+ *s += string(prepend);
+
+ for (StringVec::reverse_iterator i = stringvec.rbegin();
+ i != stringvec.rend(); ++i)
+ {
+ *s += '/';
+ *s += *i;
+ }
+
+ return s;
+}
+
+void NameSpace::output(const char *root)
+{
+ std::auto_ptr<string> path(map_sym(root, this));
+
+ if (mkdir(path->c_str(), 0777) && errno != EEXIST) {
+ fprintf(stderr, "Cannot create directory \"%s\": %s.\n",
+ path->c_str(), strerror(errno));
+
+ throw UserError();
+ }
+
+ for (tbl_type::iterator i = tbl.begin(); i != tbl.end(); ++i) {
+ Symbol *sym = (*i).second;
+
+ if (!sym->priv)
+ sym->output(root);
+ }
+}
+
+void Def::output_self(const char *root, Symbol *sym, bool dir)
+{
+ std::auto_ptr<string> path(map_sym(root, sym));
+
+ if (dir)
+ *path += "/.self";
+
+ int fd = open(path->c_str(), O_CREAT | O_EXCL | O_WRONLY, 0666);
+ if (fd < 0) {
+ fprintf(stderr, "Cannot create file \"%s\": %s.\n",
+ path->c_str(), strerror(errno));
+
+ throw UserError();
+ }
+
+ // open() is used so that O_EXCL can be specified, but
+ // fwrite is used for buffering.
+ File f = fdopen(fd, "wb");
+ if (!f) {
+ fprintf(stderr, "Cannot fdopen \"%s\": %s.\n",
+ path->c_str(), strerror(errno));
+
+ BUG();
+ }
+
+ if (fwrite(&hdr, sizeof(hdr), 1, f) != 1)
+ goto err;
+ if (fwrite(self, self_len, 1, f) != 1)
+ goto err;
+ if (!output_extra(f))
+ goto err;
+
+ return;
+
+err:
+ fprintf(stderr, "Cannot write to file \"%s\": %s.\n",
+ path->c_str(), strerror(ferror(f)));
+
+ throw UserError();
+}
+
+template<typename T, bool namespace_qual>
+bool output_list(list<T> &entries, int count, FILE *f)
+{
+ int index = 0;
+
+ for (typename list<T>::iterator i = entries.begin();
+ i != entries.end(); ++i)
+ {
+ StringRef s;
+
+ if (index >= count)
+ BUG();
+
+ if (namespace_qual)
+ s = (*i)->get_fq_name()->flatten();
+ else
+ s = (*i)->name;
+
+ int32_t len = s->length();
+
+ if (fwrite(&len, sizeof(len), 1, f) != 1)
+ return false;
+
+ if (fwrite(s->c_str(), len + 1, 1, f) != 1)
+ return false;
+
+ // Make sure the next string's byte count is aligned on a
+ // 4-byte boundary. Len now includes the null.
+
+ len++;
+
+ int pad = ((len + 3) & ~3) - len;
+ int32_t zero = 0;
+
+ if (pad && fwrite(&zero, pad, 1, f) != 1)
+ return false;
+
+ index++;
+ }
+
+ if (index != count)
+ BUG();
+
+ return true;
+}
+
+void Enum::output(const char *root)
+{
+ NameSpace::output(root);
+ output_self(root, this, true);
+}
+
+bool Enum::output_extra(FILE *f)
+{
+ return output_list<DatumRef, false>(entries, def.num_entries, f);
+}
+
+void Interface::output(const char *root)
+{
+ NameSpace::output(root);
+ output_self(root, this, true);
+}
+
+bool Interface::output_extra(FILE *f)
+{
+ return output_list<MethodRef, false>(methods, def.num_methods, f) &&
+ output_list<InterfaceRef, true>(supers, def.num_supers, f);
+}
+
+void Method::output(const char *root)
+{
+ NameSpace::output(root);
+ output_self(root, this, true);
+}
+
+bool Method::output_extra(FILE *f)
+{
+ return output_list<ParamRef, false>(entries, def.num_entries, f);
+}
+
+void Param::output(const char *root)
+{
+ output_self(root, this, false);
+}
+
+bool Param::output_extra(FILE *f)
+{
+ if (!basic)
+ return fwrite(type_fq_name->c_str(), def.type.length + 1, 1, f) == 1;
+ else
+ return true;
+}
+
+void Struct::output(const char *root)
+{
+ NameSpace::output(root);
+ output_self(root, this, true);
+}
+
+bool Struct::output_extra(FILE *f)
+{
+ if (!output_list<DatumRef, false>(entries, def.num_entries, f))
+ return false;
+
+ if (super) {
+ list<Struct *> superlist;
+ superlist.push_back(super);
+ return output_list<Struct *, true>(superlist, 1, f);
+ }
+
+ return true;
+}
+
+void BitField::output(const char *root)
+{
+ NameSpace::output(root);
+ output_self(root, this, true);
+}
+
+bool BitField::output_extra(FILE *f)
+{
+ return output_list<DatumRef, false>(entries, def.num_entries, f);
+}
+
+void Datum::output(const char *root)
+{
+ if (def.flags.field.Const && !const_init) {
+ resolve_constant_chain();
+
+ if (!const_init)
+ throw UserError();
+ }
+
+ output_self(root, this, false);
+}
+
+bool Datum::output_extra(FILE *f)
+{
+ if (!basic)
+ return fwrite(type_fq_name->c_str(), def.type.length + 1, 1, f) == 1;
+ else
+ return true;
+}
+
+void BasicType::output(const char *root)
+{
+ if (!complete)
+ BUG();
+
+ output_self(root, this, false);
+}
+
+void Alias::output(const char *root)
+{
+ output_self(root, this, false);
+}
+
+bool Alias::output_extra(FILE *f)
+{
+ return fwrite(sym_fq_name->c_str(), def.length + 1, 1, f) == 1;
+}
+
+void UserNameSpace::output(const char *root)
+{
+ NameSpace::output(root);
+ output_self(root, this, true);
+}
+
+bool UserNameSpace::output_extra(FILE *f)
+{
+ return fwrite(mountpoint_name->c_str(), def.length + 1, 1, f) == 1;
+}
--- /dev/null
+#define LESSTHAN() <
+#include LESSTHAN()BUILDDIR/idlparse.cc.h>
--- /dev/null
+%{
+/* scan.lex -- scanner for the IDL compiler
+ *
+ * Written by Scott Wood <scott@buserror.net>
+ */
+
+#include <idlc.h>
+#include <parser.h>
+
+#include <string.h>
+#include <string>
+#include <limits.h>
+#include <stdlib.h>
+#include <cmath>
+#include <cerrno>
+
+int curline, comment_level=0;
+%}
+
+%x COMMENT
+%x STRING
+%x LINECOMMENT
+%%
+
+interface return TOK_IFACE;
+const return TOK_CONST;
+\( return '(';
+\) return ')';
+bool return TOK_BOOL;
+ushort return TOK_USHORT;
+uint return TOK_UINT;
+ulong return TOK_ULONG;
+short return TOK_SHORT;
+int return TOK_INT;
+long return TOK_LONG;
+fshort return TOK_FSHORT;
+flong return TOK_FLONG;
+struct return TOK_STRUCT;
+char return TOK_CHAR;
+octet return TOK_OCTET;
+\{ return '{';
+\} return '}';
+: return ':';
+\[ return '[';
+\] return ']';
+\; return ';';
+= return '=';
+\, return ',';
+\< return '<';
+\> return '>';
+\.\.\. return TOK_3DOT;
+\.\. return TOK_2DOT;
+\. return '.';
+\* return '*';
+- return '-';
+bitfield return TOK_BITFIELD;
+enum return TOK_ENUM;
+namespace return TOK_NAMESPACE;
+using return TOK_USING;
+async return TOK_ASYNC;
+out return TOK_OUT;
+inout return TOK_INOUT;
+shared return TOK_SHARED;
+push return TOK_PUSH;
+typedef return TOK_TYPEDEF;
+alias return TOK_ALIAS;
+upcast return TOK_INVALID;
+downcast return TOK_INVALID;
+ifaceref return TOK_INVALID;
+virtual return TOK_VIRTUAL;
+guid return TOK_GUID;
+inline return TOK_INLINE;
+static return TOK_STATIC;
+immutable return TOK_IMMUTABLE;
+true return TOK_TRUE;
+false return TOK_FALSE;
+
+name return TOK_NAME; // CDL tokens
+method return TOK_METHOD;
+class return TOK_CLASS;
+copy return TOK_COPY;
+
+
+<*>\r // Put up with DOS files
+
+[[:alpha:]][[:alnum:]_]* {
+ // Leading underscores and digits are prohibited by the regex.
+ if (yytext[yyleng - 1] == '_') {
+ yyerrorf("Identifier \"%s\" has a trailing underscore.",
+ yytext);
+ } else if (strstr(yytext, "__")) {
+ yyerrorf("Identifier \"%s\" has contiguous underscores.",
+ yytext);
+ } else if (!strncmp(yytext, "IDL_", 4)) {
+ yyerrorf("Identifier \"%s\" begins with the reserved "
+ "\"IDL_\" prefix.", yytext);
+ } else if (strstr(yytext, "IDLNS")) {
+ yyerrorf("Identifier \"%s\" contains the reserved sequence "
+ "\"IDLNS\".", yytext);
+ } else if (!strcmp(yytext + yyleng - 3, "_ns")) {
+ yyerrorf("Identifier \"%s\" ends with the reserved suffix \"_ns\".", yytext);
+ }
+
+ *yylval_string = new String(yytext, cur_input_file, curline, TOK_IDENT);
+ return TOK_IDENT;
+}
+
+
+0[0-9]+ {
+ errno = 0;
+
+ if (strchr(yytext, '8') || strchr(yytext, '9')) {
+ yyerrorf("Invalid digit in octal constant %s.", yytext);
+ yylval_con->type = TOK_INVALID;
+ yylval_con->con.ucon = 0;
+ return TOK_INVALID;
+ }
+
+ yylval_con->con.ucon = strtoull(yytext, NULL, 8);
+
+ if (errno == ERANGE) {
+ yyerrorf("Constant %s is out of range.", yytext);
+ yylval_con->type = TOK_INVALID;
+ yylval_con->con.ucon = 0;
+ } else if (yylval_con->con.ucon > LLONG_MAX) {
+ yylval_con->type = TOK_UCON;
+ } else {
+ yylval_con->type = TOK_ICON;
+ }
+
+ return yylval_con->type;
+}
+
+0x[0-9a-f]+ {
+ errno = 0;
+ yylval_con->con.ucon = strtoull(yytext, NULL, 16);
+
+ if (errno == ERANGE) {
+ yyerrorf("Constant %s is out of range.", yytext);
+ yylval_con->type = TOK_INVALID;
+ yylval_con->con.ucon = 0;
+ } else if (yylval_con->con.ucon > LLONG_MAX) {
+ yylval_con->type = TOK_UCON;
+ } else {
+ yylval_con->type = TOK_ICON;
+ }
+
+ return yylval_con->type;
+}
+
+[0-9]+ {
+ errno = 0;
+ yylval_con->con.ucon = strtoull(yytext, NULL, 10);
+
+ if (errno == ERANGE) {
+ yyerrorf("Constant %s is out of range.", yytext);
+ yylval_con->type = TOK_INVALID;
+ yylval_con->con.ucon = 0;
+ } else if (yylval_con->con.ucon > LLONG_MAX) {
+ yylval_con->type = TOK_UCON;
+ } else {
+ yylval_con->type = TOK_ICON;
+ }
+
+ return yylval_con->type;
+}
+
+[0-9]*\.[0-9]+([eE][+-][0-9]+)? {
+ errno = 0;
+ yylval_con->con.fcon = strtod(yytext, NULL);
+
+ if (errno == ERANGE) {
+ yyerrorf("Constant %s is out of range.", yytext);
+ yylval_con->con.fcon = 0;
+ yylval_con->type = TOK_INVALID;
+ } else {
+ yylval_con->type = TOK_FCON;
+ }
+
+ return yylval_con->type;
+}
+
+
+
+\" {
+ BEGIN(STRING);
+ *yylval_string = new String("", cur_input_file, curline, TOK_STR);
+}
+
+<STRING>{
+ \" {
+ BEGIN(INITIAL);
+ return TOK_STR;
+ }
+
+ \\\" **yylval_string += '"';
+ \\\n **yylval_string += '\n';
+ \\\\ **yylval_string += '\\';
+ \\. {
+ yyerrorf("Unknown escape sequence '\\%c'.", yytext[1]);
+ **yylval_string += yytext[1];
+ }
+ \n {
+ yyerrorf("Unterminated string literal at line %d.", curline);
+ BEGIN(INITIAL);
+ curline++;
+ }
+ [^\\\"\n]+ **yylval_string += yytext;
+}
+
+"/*" {
+ comment_level = 1;
+ BEGIN(COMMENT);
+}
+
+<COMMENT>{
+ "/*" comment_level++;
+ "*/" if (!(--comment_level)) BEGIN(INITIAL);
+ \n curline++;
+ \*+[^/*\n]*
+ \/+[^/*\n]*
+ [^/*\n]+
+}
+
+"//" BEGIN(LINECOMMENT);
+<LINECOMMENT>\n {
+ BEGIN(INITIAL);
+ curline++;
+}
+<LINECOMMENT>[^\n]+
+
+[[:blank:]]+ /* Whitespace */
+
+
+\n curline++;
+
+<<EOF>> {
+ if (comment_level) {
+ yyerrorf("End of file within comment.");
+
+ // Keep message from occurring twice if parse is OK
+ comment_level = 0;
+ }
+
+ return 0;
+}
+
+. {
+ if (isprint(yytext[0]))
+ yyerrorf("Unexpected character '%c'.", yytext[0]);
+ else
+ yyerrorf("Unexpected character #%d.", yytext[0]);
+}
+
+%%
+
+int finish_lex()
+{
+ if (comment_level)
+ yyerrorf("End of file within comment.");
+
+ return 0;
+}
--- /dev/null
+#include <targets.h>
+
+Target *target = &targets[0];
+
+Target targets[] = {
+ // dummy
+ {
+ "none"
+ },
+
+ // x86
+ {
+ "x86",
+ pointer_size: 4,
+ big_endian: false,
+ bitfield_big_endian: false
+ },
+
+ // x64
+ {
+ "x64",
+ pointer_size: 8,
+ big_endian: false,
+ bitfield_big_endian: false
+ },
+
+ // ppc
+ {
+ "ppc",
+ pointer_size: 4,
+ big_endian: true,
+ bitfield_big_endian: true
+ },
+};
--- /dev/null
+/* targets.h -- target architecture definitions
+ *
+ * Written by Scott Wood <scott@buserror.net>
+ */
+
+#ifndef IDLC_TARGETS_H
+#define IDLC_TARGETS_H
+
+enum {
+ target_none, target_x86, target_x64, target_ppc
+};
+
+struct Target {
+ const char *name;
+ int pointer_size;
+ bool big_endian;
+ bool bitfield_big_endian;
+};
+
+#define max_target 3
+
+extern Target targets[max_target + 1];
+extern Target *target;
+
+#endif
--- /dev/null
+all: system test
+
+.PHONY: system
+system: clean
+ rm -rf include/c++ server/c++ ifaces
+ $(IDLC) -o ifaces/system `find ../../idl -name '*.idl'` -s System
+ $(IDLC) -o include/c++ -l c++ -s System -i ifaces/system -t ppc
+ $(IDLC) -o server/c++ -l c++ -r -s System -i ifaces/system -t ppc cpp-server.cdl
+
+test: system
+ g++ -Wall -c -g3 -O3 ../../lib/c++/orb.cc -o orb.o -I include/c++ -I ../../include/c++
+ g++ -Wall -g3 -O3 cpp-caller.cc orb.o -o cpp-caller -I include/c++ -I ../../include/c++
+ g++ -Wall -g3 -O3 cpp-server.cc orb.o -o cpp-server -I include/c++ -I ../../include/c++
+ g++ -Wall -g3 -O3 cpp-vstruct.cc orb.o -o cpp-vstruct -I include/c++ -I ../../include/c++
+
+clean:
+ rm -rf include/c++ server/c++ ifaces
+ rm -f cpp-caller cpp-server *.o .gdb_history
--- /dev/null
+ typedef d b;
+
+ struct d {
+ int f;
+ };
+
+ struct a {
+ b c;
+ };
--- /dev/null
+typedef int b;
+
+namespace blah {
+ struct c : b {
+ };
+
+ struct b : a {
+ };
+
+ struct a {
+ };
+}
+
--- /dev/null
+struct a {
+ struct b virtual {
+ guid: "F92F8C35-E121-11DA-B924-000A95BB581A";
+ };
+};
--- /dev/null
+#include <stdio.h>
+#include <System.h>
+
+using namespace System;
+using RunTime::Array;
+
+int main()
+{
+ IO::FileStream f;
+ IO::IStream i;
+ IO::OStream o;
+ IO::IOStream io;
+
+ printf("sizeof(*FileStream) == %lu\n", sizeof(*f));
+ printf("sizeof(*IOStream) == %lu\n", sizeof(*io));
+ printf("sizeof(*IStream) == %lu\n", sizeof(*i));
+
+ printf("sizeof(FileStream) == %lu\n", sizeof(f));
+ printf("sizeof(IOStream) == %lu\n", sizeof(io));
+ printf("sizeof(IStream) == %lu\n", sizeof(i));
+
+ i = f;
+ io = IO::IOStream::downcast(i);
+
+ if (!io)
+ printf("io is null\n");
+ else {
+ uint8_t buf[256];
+ Array<uint8_t> abuf(buf, 256);
+ uint64_t len = 64;
+
+ io.read(&abuf, &len);
+ io.write(Array<uint8_t>(buf, 128), &len);
+ }
+
+ return 0;
+}
+
+uint64_t call1(IO::FileStream fs)
+{
+ uint64_t size;
+ fs.size(&size);
+ return size;
+}
+
+void call2(IO::FileStream fs)
+{
+ fs.seek(666, IO::FileStream_ns::SeekType::FromCurrent);
+}
+
+uint64_t call3(Mem::Cache c)
+{
+ uint64_t size;
+ c.get_size(&size);
+ return size;
+}
+
+uint64_t call4(Mem::Cache c)
+{
+ uint64_t size;
+ c.get_size(&size);
+ c.get_size(&size);
+ return size;
+}
+
+
+uint64_t call5(Mem::Cache c)
+{
+ uint64_t size;
+ Mem::Mappable m = c;
+ m.get_size(&size);
+ m = c;
+ m.get_size(&size);
+ return size;
+}
+
+uint64_t call6(Mem::Cache c)
+{
+ uint64_t size;
+ Mem::Mappable m = c;
+ m.get_size(&size);
+ m.get_size(&size);
+ return size;
+}
+
+//const unsigned long System::IO::_i_IOStream::guid[16] = {};
--- /dev/null
+#include <stdio.h>
+#include <System.h>
+
+using System::IO::File;
+using namespace System::IO::FileStream_ns;
+using System::RunTime::Array;
+using System::RunTime::MutableArray;
+
+
+namespace Stuff {
+ using System::Notifiers::Notifier;
+
+class FileStream {
+ File f;
+ uint64_t pos;
+
+public:
+#include "server/c++/Stuff/FileStream.h"
+//#include "sample-cpp-server-decl.h"
+
+ FileStream()
+ {
+ init_iface();
+ }
+
+ void set_file(File F)
+ {
+ using System::IO;
+ printf("this is %p\n", this);
+ IO::FileStream fs = *this;
+ IO::IOStream ostr = fs;
+ IO::FileStream fs2 = IO::FileStream::downcast(ostr);
+ printf("classptr is %p/%p\n", classptr(fs), classptr(fs2));
+
+ if (!F)
+ printf("File is NULL\n");
+ else
+ printf("File is not NULL\n");
+
+ f = F;
+ }
+
+ void get_file(File *F)
+ {
+ *F = f;
+ }
+
+ void seek(int64_t offset, SeekType type)
+ {
+ if (!f) {
+ // FIXME: Add a constructor to structs
+ System::Exceptions::Std::InvalidState err;
+ throw err;
+ }
+
+ switch (type) {
+ case SeekType::FromBeginning:
+ pos = offset;
+ break;
+
+ case SeekType::FromCurrent:
+ pos += offset;
+ break;
+
+ case SeekType::FromEnd:
+ f.size(&pos);
+ pos += offset;
+ break;
+ }
+ }
+
+ void get_pos(uint64_t *POS)
+ {
+ *POS = pos;
+ }
+
+ void size(uint64_t *size)
+ {
+ f.size(size);
+ }
+
+ void read(Array<uint8_t> *buf, uint64_t *len)
+ {
+ if (!f) {
+ // throw exc
+ return;
+ }
+
+ f.read(pos, buf, len);
+ pos += *len;
+ }
+
+ void read_foo(MutableArray<uint8_t> buf, uint64_t len, Notifier n)
+ {
+ if (!f) {
+ // throw exc
+ return;
+ }
+
+ f.read_async(pos, buf, len, n);
+ pos += len;
+ }
+
+ void write(Array<uint8_t> buf, uint64_t *len)
+ {
+ if (!f) {
+ // throw exc
+ return;
+ }
+
+ f.write(pos, buf, len);
+ pos += *len;
+ }
+
+ void write_foo(Array<uint8_t> buf, uint64_t len, Notifier n)
+ {
+ printf("write_foo, this %p\n", this);
+
+ if (!f) {
+ // throw exc
+ return;
+ }
+
+ f.write_async(pos, buf, len, n);
+ pos += len;
+ }
+
+ void new_timer(::System::Time::Timer *timer)
+ {
+ }
+};
+}
+
+//#include "sample-cpp-server-footer.cc"
+#include "server/c++/footer.cc"
+
+#if 0
+const unsigned long System::IO::_i_IOStream::guid[4] = { 1 };
+const unsigned long System::IO::_i_IStream::guid[4] = { 2 };
+const unsigned long System::IO::_i_OStream::guid[4] = { 3 };
+const unsigned long System::IO::_i_FileStream::guid[4] = { 4 };
+const unsigned long System::_i_Object::guid[4] = { 5 };
+#endif
+
+int main()
+{
+ Stuff::FileStream *fsimp = new Stuff::FileStream();
+ System::IO::FileStream fs = *fsimp;
+
+ if (!fs)
+ printf("fs is NULL\n");
+ else {
+ printf("fs is not NULL\n");
+ fs.set_file(NULL);
+ }
+
+ printf("fs is %p\n", (void *)fs);
+ fs.write_async(Array<uint8_t>(NULL, 0), 0, NULL);
+ printf("fs is %p\n", (void *)fs);
+ System::IO::OStream ostr = fs;
+ printf("ostr is %p\n", (void *)ostr);
+ ostr.write_async(Array<uint8_t>(NULL, 0), 0, NULL);
+ System::Object obj = fs;
+ System::IO::FileStream fs2 = System::IO::FileStream::downcast(obj);
+ System::IO::OStream os2 = System::IO::OStream::downcast(obj);
+ printf("obj %p fs2 %p os2 %p\n", (void *)obj, (void *)fs2, (void *)os2);
+
+ return 0;
+}
--- /dev/null
+class Stuff.FileStream : System.IO.FileStream, System.Time.TimerFactory, System.IO.IStream
+{
+ // IDLC will expect a method in FileStream (or its parents) for every
+ // method in System.IO.FileStream and its parents. If any methods are
+ // named differently than in the caller, that has to be indicated here
+ // with a method block. If there is a name conflict between parents,
+ // all but one *must* be overridden with a non-conflicting name (and
+ // callers will need to cast to the appropriate base interface).
+
+ method System.IO.IStream.read_async name read_foo;
+
+ method System.IO.OStream.write_async {
+ name write_foo;
+
+ // If any parameters are to have "copy" semantics, this must be
+ // specified here (a comma-delimited list may be used).
+
+ copy buf;
+ }
+}
--- /dev/null
+#include <stdio.h>
+#include <System.h>
+
+using System::Exceptions::Exception;
+using namespace System::Exceptions::Std;
+
+System::VStruct *foo;
+
+int main()
+{
+ Exception *e, *e2;
+ SystemException *se, *se2;
+
+ MemoryFault mf;
+ InstrFault inf;
+ ArchInstrFault aif;
+
+ // Upcasts can be done directly.
+ se = &mf;
+
+ // Downcast not needed, but should work anyway
+ e = Exception::downcast(&inf);
+
+ MemoryFault *mf2 = MemoryFault::downcast(se),
+ *mf3 = MemoryFault::downcast(e);
+ InstrFault *inf2 = InstrFault::downcast(se),
+ *inf3 = InstrFault::downcast(e);
+ OutOfMemory *oom = OutOfMemory::downcast(e),
+ *oom2 = OutOfMemory::downcast(&aif);
+
+ e2 = Exception::downcast(&aif);
+ se2 = SystemException::downcast(foo);
+
+ printf("Exception: %p %p (should be: ptr NULL)\n", e, e2);
+ printf("SystemException: %p %p (should be: ptr NULL)\n", se, se2);
+ printf("MemoryFault: %p %p (should be: ptr NULL)\n", mf2, mf3);
+ printf("InstrFault: %p %p (should be: NULL ptr)\n", inf2, inf3);
+ printf("OutOfMemory: %p %p (should be: NULL NULL)\n", oom, oom2);
+
+ return 0;
+}
--- /dev/null
+namespace blah.aoeu.baz {
+}
+
+struct foo {
+ struct bar {
+ };
+
+ bar foo;
+
+ struct baz {
+ };
+
+ baz[...7] foo2;
+ typedef int bt1;
+ alias int bt2;
+ alias bt1 bt3;
+ typedef bt3 bt4;
+ alias flong bt5;
+ typedef bt5 bt6;
+ const bt1 v = z;
+ const bt2 w = 4;
+ const bt3 x = v;
+ const bt4 y = 6;
+ const int z = 8;
+ const flong f1 = -8988;
+ const bt6 f2 = 3.14159;
+ int[5.....test.foo.z] arr;
+};
+
+interface htns {
+ guid: "18822205-18C8-11DA-A6A1-000A95BB581A";
+
+ a(..test.foo.baz p);
+ b() async;
+ async(foo[1...4] out out shared shared, int a) async;
+};
+
+interface htns2 : htns {
+ guid: "1B77F182-18C8-11DA-BD70-000A95BB581A";
+
+ b() async;
+ a(..test.foo.baz p) async;
+ async() async;
+};
+
+bitfield b {
+ a, b:3, c
+};
+
+struct aoeu.htns {
+// foo.baz d;
+ System.Exceptions.Exception foo;
+// int foo;
+
+ ..test.foo bar;
+ System.Objects.ID baz;
+
+ enum {
+ } a, b, c;
+
+ const bool cbf = false;
+ const bool cbt = true;
+ const bool cbi = cbt;
+};
--- /dev/null
+interface Object
+{
+ guid: "9D7AFDD8-FD84-11D9-8D5B-000A95BB581A";
+};
+
+namespace blah.aoeu.baz {
+}
+
+struct foo3 {
+ struct aoeuhtns {
+ };
+};
+
+struct foo : foo3 {
+ using foo3.*;
+
+ struct bar {
+ };
+
+ bar foo;
+
+ struct baz {
+ };
+
+ aoeuhtns aoeu;
+
+ baz[...7] foo2;
+ typedef int bt1;
+ alias int bt2;
+ alias bt1 bt3;
+ typedef bt3 bt4;
+ alias flong bt5;
+ typedef bt5 bt6;
+ const int v = z;
+ const bt2 w = 4;
+ const bt3 x = v;
+ const bt4 y = 6;
+ const int z = 93;
+ const flong f1 = -8988;
+ const bt6 f2 = 3.14159;
+ int[5...z] arr;
+};
+
+interface htns {
+ guid: "A10BEB64-FD84-11D9-909B-000A95BB581A";
+ a(System.foo.baz p);
+ b() async;
+ async(foo[1...4] async shared out, int a) async;
+};
+
+interface htns2 : htns {
+ guid: "A3759710-FD84-11D9-A11A-000A95BB581A";
+ b() async;
+ a(System.foo.baz p) async;
+ async() async async;
+};
+
+bitfield b {
+ a, b:3, c
+};
+
+struct aoeu.htns {
+// foo.baz d;
+// int foo;
+
+ enum {
+ } a, b, c;
+};
+
+struct VStruct virtual {
+ guid: "E0C0D164-3102-11DA-B067-00112431A05E";
+};
--- /dev/null
+interface Object {
+ guid: "4E9538F2-03E0-11DA-A88E-000A95BB581A";
+};
+
+namespace A {
+ struct D {
+ };
+
+ struct F {
+ };
+
+ // A.G is declared as a namespace; no attempt is made to look for
+ // a global G.
+ struct G.Q {
+ };
+}
+
+struct B {
+ struct C {
+ struct X {
+ };
+ };
+
+ struct G {
+ };
+
+ struct H {
+ };
+
+ struct I {
+ };
+};
+
+struct D {
+ // Imports B and B.C, not B and C. If it were "using C.*, B.*",
+ // then it would import C and C.B.
+
+ using B.*, C.*;
+ using System.B.I;
+
+ struct E {
+// B b; // Error, Uses C.B, rule #2 once D is reached. Only the
+ // contents of namespaces are imported, not the
+ // names themselves.
+// C.D cd; // Error. For the same reason as above, C refers
+ // to B.C, not C. There is no D in B.C.
+ C.X cx; // Uses B.C.X.
+ D d; // Uses C.D, rule #2 when D is reached. D itself
+ // is declared in the global namespace, which doesn't
+ // get reached because a match is found sooner.
+ E e; // Uses D.E, rule #1 once D is reached. C.E would
+ // have been matched by rule #2 if D.E did not exist.
+ F f; // Uses C.F, rule #2 once D is reached. A.F is not
+ // matched, as importation is not transitive.
+ G g; // Uses B.G, rule #2
+// H h; // Error, both B.H and C.H match on rule #2 once
+ // D is reached. B does not get precedence for
+ // coming first, or because the C importation
+ // comes after this lookup.
+ I i; // Uses B.I, as specific-symbol imports are treated
+ // as symbols declared in this namespace, rather than
+ // imports (and thus B.I has precedence over C.I).
+ };
+
+// int I; // Error; this conflicts with the specific-symbol
+ // importation of B.I.
+
+ // Now C is imported. This importation is valid through the
+ // entire namespace (except for prior using statements), not just
+ // after this point.
+
+ using System.C.*;
+};
+
+namespace C {
+ using A.*;
+
+ struct D {
+ };
+
+ struct E {
+ };
+
+ struct F {
+ };
+
+ struct H {
+ };
+
+ struct I {
+ };
+
+ struct B.E {
+ struct F {
+ };
+
+ struct E {
+ };
+
+// B b; // Error: Uses C.B, rule #1 once C is reached,
+ // but C.B is not a type
+ D d; // Uses C.D, rule #1 once C is reached
+ B.E be; // Uses C.B.E, rule #1 once C is reached
+ E e; // Uses C.B.E.E, rule #1 in current namespace
+// E.E ee; // Error, as the first E matches C.B.E.E, and
+ // there is no C.B.E.E.E. The extra .E is
+ // not used for disambiguation.
+ F f; // Uses C.B.E.F, rule #1 in current namespace
+// G g; // Error: Uses A.G, rule #2 once namespace C is reached.
+ H h; // Uses C.H, rule #1 once namespace C is reached.
+ };
+}
--- /dev/null
+/* types.cc -- Semantic actions for types and namespaces.
+ *
+ * Written by Scott Wood <scott@buserror.net>
+ */
+
+#include <cfloat>
+#include <sstream>
+
+#include <idlc.h>
+#include <parser.h>
+#include <lang.h>
+
+bool Datum::verify_const()
+{
+ assert(def.flags.field.Const);
+
+ // TOK_INVALID should have caused an abort in an earlier pass, and
+ // TOK_DCON should have been replaced by the final constant by now.
+
+ assert(con_type == TOK_ICON || con_type == TOK_UCON ||
+ con_type == TOK_FCON || con_type == TOK_BOOL);
+
+ if (cbt->flags.field.Float) {
+ if (con_type == TOK_UCON)
+ def.fcon = static_cast<double>(def.ucon);
+ else if (con_type == TOK_ICON)
+ def.fcon = static_cast<double>(def.icon);
+ else if (con_type == TOK_BOOL) {
+ yyerrorfl(name->file, name->line,
+ "%s is of floating-point type, but is initialized with "
+ "a boolean constant.", name->c_str());
+
+ return false;
+ }
+
+ con_type = TOK_FCON;
+
+ if (cbt->bits == 32) {
+ if (def.fcon > FLT_MAX || def.fcon < FLT_MIN) {
+ yyerrorfl(name->file, name->line,
+ "%s is out of range for a single-precision FP datum.",
+ name->c_str());
+
+ return false;
+ }
+ }
+
+ const_init = true;
+ return true;
+ }
+
+ if (con_type == TOK_FCON) {
+ yyerrorfl(name->file, name->line,
+ "%s is of %s type, but is initialized with "
+ "a floating-point constant.",
+ cbt->flags.field.Bool ? "boolean" : "integral",
+ name->c_str());
+
+ return false;
+ }
+
+ if (cbt->flags.field.Bool) {
+ if (con_type != TOK_BOOL) {
+ yyerrorfl(name->file, name->line,
+ "%s is of boolean type, but is initialized with "
+ "a non-boolean constant.", name->c_str());
+
+ return false;
+ }
+
+ assert(def.ucon == 0 || def.ucon == 1);
+
+ const_init = true;
+ return true;
+ }
+
+ if (con_type == TOK_BOOL) {
+ yyerrorfl(name->file, name->line,
+ "%s is of integral type, but is initialized with "
+ "a boolean constant.", name->c_str());
+
+ return false;
+ }
+
+ if (cbt->flags.field.Unsigned) {
+ if (con_type == TOK_ICON && def.icon < 0) {
+ yyerrorfl(name->file, name->line,
+ "Initializer for \"%s\" is out of range.",
+ name->c_str());
+ return false;
+ }
+
+ if (cbt->bits < 64) {
+ uint64_t max = (1ULL << cbt->bits) - 1;
+
+ if (def.ucon > max) {
+ yyerrorfl(name->file, name->line,
+ "Initializer for \"%s\" is out of range.",
+ name->c_str());
+ return false;
+ }
+ }
+ } else {
+ if (con_type == TOK_UCON && def.icon < 0) {
+ yyerrorfl(name->file, name->line,
+ "Initializer for \"%s\" is out of range.",
+ name->c_str());
+ return false;
+ }
+
+ if (cbt->bits < 64) {
+ // Yes, the constant should be unsigned before conversion,
+ // as the temporary value could overflow a signed datum
+ // before the one is subtracted if bits == 63. It won't
+ // matter on most machines, but you never know...
+
+ int64_t max = (1ULL << (cbt->bits - 1)) - 1;
+
+ if (def.icon > max || def.icon < -max + 1) {
+ yyerrorfl(name->file, name->line,
+ "Initializer for \"%s\" is out of range.",
+ name->c_str());
+ return false;
+ }
+ }
+ }
+
+ const_init = true;
+ return true;
+}
+
+void Datum::init_const_early(Con *con)
+{
+ if (con) {
+ if (con->type == TOK_DCON) {
+ assert(current_pass == 1);
+ const_val_name = con->con.dcon;
+ }
+
+ def.flags.field.Const = 1;
+ con_type = con->type;
+ def.ucon = con->con.ucon;
+ }
+}
+
+void Datum::init(StrList *type, Array *ARRAY, Con *con)
+{
+ assert(!complete);
+
+ type_name = type;
+ cbt = NULL;
+
+ basic = false;
+ complete = true;
+
+ set_array(ARRAY);
+ init_const_early(con);
+}
+
+void Datum::init(CompiledBasicType &CBT, Array *ARRAY, Con *con)
+{
+ assert(!complete);
+
+ def.basictype = CBT;
+ cbt = &def.basictype;
+
+ basic = true;
+ complete = true;
+
+ set_array(ARRAY);
+ init_const_early(con);
+}
+
+void Datum::process_type()
+{
+ assert(current_pass >= 4);
+
+ if (cbt)
+ return;
+
+ assert(!type_fq_name);
+ assert(type_sym);
+
+ Symbol *sym = type_sym->get_concrete_sym(false);
+ type = dynamic_cast<Type *>(sym);
+
+ if (!type) {
+ const String *str = type_name->back();
+ yyerrorfl(str->file, str->line, "\"%s\" is not a type.",
+ sym->get_fq_name()->flatten()->c_str());
+
+ throw UserError();
+ }
+
+ BasicType *bt = dynamic_cast<BasicType *>(type->get_concrete_sym());
+
+ if (bt) {
+ if (!bt->def.flags.field.TypeDef)
+ use_anon_type(bt->def);
+ else
+ cbt = &bt->def;
+ } else if (const_val) {
+ const String *str = type_name->back();
+ yyerrorfl(str->file, str->line,
+ "\"%s\" is not a basic type (and thus \"%s\" cannot "
+ "be const).",
+ type->get_fq_name()->flatten()->c_str(),
+ name->c_str());
+ throw UserError();
+ }
+
+ if (!basic) {
+ type_fq_name = type->get_fq_name()->flatten();
+ def.type.length = type_fq_name->length();
+ }
+}
+
+Datum *Datum::resolve_constant_chain()
+{
+ if (chain_traversed == traversal) {
+ yyerrorfl(name->file, name->line,
+ "Initialization of \"%s\" forms an infinite loop:",
+ get_fq_name()->flatten()->c_str());
+ return this;
+ }
+
+ chain_traversed = traversal;
+
+ assert(!const_val);
+ assert(!const_init);
+
+ process_type();
+
+ if (const_val_sym) {
+ const_val = dynamic_cast<Datum *>(const_val_sym->get_concrete_sym());
+
+ if (!const_val) {
+ const String *str = const_val_name->back();
+ yyerrorfl(str->file, str->line, "\"%s\" is not a datum.",
+ const_val_sym->get_fq_name()->flatten()->c_str());
+ throw UserError();
+ }
+
+ if (!const_val->def.flags.field.Const) {
+ const String *str = const_val_name->back();
+ yyerrorfl(str->file, str->line, "\"%s\" is not a constant.",
+ const_val->get_fq_name()->flatten()->c_str());
+ throw UserError();
+ }
+
+ if (!const_val->const_init) {
+ Datum *d = const_val->resolve_constant_chain();
+ if (d) {
+ yyerrorfl(name->file, name->line, "...initializes \"%s\"",
+ get_fq_name()->flatten()->c_str());
+
+ if (d == this)
+ throw UserError();
+
+ return d;
+ }
+ }
+
+ assert(const_val->const_init);
+
+ con_type = const_val->con_type;
+ def.ucon = const_val->def.ucon;
+ }
+
+ if (!verify_const())
+ throw UserError();
+
+ assert(const_init);
+ return NULL;
+}
+
+// FIXME: Check for inline struct loops.
+void Datum::final_analysis()
+{
+ Struct *str = dynamic_cast<Struct *>(*type);
+ if (str && str->is_inline())
+ set_inline();
+
+ if (array) {
+ array->final_analysis();
+ def.basictype.array = array->ca;
+ } else {
+ def.basictype.array.bounds[0] = 0;
+ def.basictype.array.bounds[1] = 0;
+ }
+
+ BitField *bfparent = dynamic_cast<BitField *>(get_ns());
+ if (bfparent) {
+ assert(!array);
+ int defsize;
+
+ if (basic)
+ defsize = 1;
+ else
+ defsize = type->get_default_bf_size();
+
+ if (defsize == -1) {
+ yyerrorfl(name->file, name->line,
+ "\"%s\" may not be the named type of bitfield entry "
+ "\"%s\", as it is not a bitfield or enumeration.",
+ type->get_fq_name()->flatten()->c_str(),
+ get_fq_name()->flatten()->c_str());
+ } else if (def.icon == -1) {
+ def.ucon = defsize;
+ } else if (def.ucon < (unsigned int)defsize) {
+ yyerrorfl(name->file, name->line,
+ "Explicit size %llu on \"%s\" is too small for "
+ "type \"%s\", which has a minimum size of %u",
+ def.ucon, get_fq_name()->flatten()->c_str(),
+ type->get_fq_name()->flatten()->c_str(), defsize);
+ }
+ }
+}
+
+void Struct::add(Symbol *sym, bool from_import)
+{
+ Datum *datum = dynamic_cast<Datum *>(sym);
+
+ if (!datum && !dynamic_cast<Alias *>(sym) &&
+ !dynamic_cast<Type *>(sym) &&
+ !dynamic_cast<Method *>(sym))
+ throw InvalidArgument();
+
+ NameSpace::add(sym, from_import);
+
+ if (!from_import && datum && !datum->def.flags.field.Const)
+ add_elem(datum);
+}
+
+void Struct::add_elem(Datum *d)
+{
+ entries.push_back(d);
+ def.num_entries++;
+}
+
+Param *Param::declare(const String *name, NameSpace *parent,
+ StrList *TYPE, CompiledParam::Flags flags,
+ Array *array)
+{
+ assert(parent);
+ assert(dynamic_cast<Method *>(parent));
+
+ Param *p = new Param(name);
+ p->init(TYPE, flags, array);
+
+ parent->add_user(p);
+ return p;
+}
+
+Method *Method::declare(const String *name, NameSpace *parent)
+{
+ assert(parent);
+ assert(dynamic_cast<Interface *>(parent));
+
+ Method *m = new Method(name);
+
+ parent->add_user(m);
+ return m;
+}
+
+void Interface::add(Symbol *sym, bool from_import)
+{
+ Datum *datum = dynamic_cast<Datum *>(sym);
+ Method *method = dynamic_cast<Method *>(sym);
+
+ if (!datum && !method &&
+ !dynamic_cast<Alias *>(sym) &&
+ !dynamic_cast<Type *>(sym))
+ throw InvalidArgument();
+
+ if (datum && !datum->def.flags.field.Const)
+ throw InvalidArgument();
+
+ NameSpace::add(sym, from_import);
+
+ if (!from_import && method)
+ add_elem(method);
+}
+
+// FIXME: check for duplicate and looping inheritance
+void Interface::add_elem(Method *method)
+{
+ methods.push_back(method);
+ def.num_methods++;
+}
+
+void Enum::add(Symbol *sym, bool from_import)
+{
+ Datum *datum = dynamic_cast<Datum *>(sym);
+
+ // It also must be const unsigned, but the const won't
+ // have been initialized yet.
+
+ if (!datum)
+ throw InvalidArgument();
+
+ NameSpace::add(sym, from_import);
+
+ if (!from_import)
+ add_elem(datum);
+}
+
+void Enum::add_elem(Datum *datum)
+{
+ entries.push_back(datum);
+ def.num_entries++;
+}
+
+void BitField::add(Symbol *sym, bool from_import)
+{
+ Datum *datum = dynamic_cast<Datum *>(sym);
+
+ if (!datum && !dynamic_cast<Enum *>(sym) &&
+ !dynamic_cast<BitField *>(sym))
+ throw InvalidArgument();
+
+ NameSpace::add(sym, from_import);
+
+ if (!from_import && datum)
+ add_elem(datum);
+}
+
+void BitField::add_elem(Datum *datum)
+{
+ entries.push_back(datum);
+ def.num_entries++;
+}
+
+void Method::add(Symbol *sym, bool from_import)
+{
+ Param *param = dynamic_cast<Param *>(sym);
+
+ if (!param)
+ throw InvalidArgument();
+
+ NameSpace::add(sym, from_import);
+ if (!from_import)
+ add_elem(param);
+}
+
+void Method::add_elem(Param *param)
+{
+ entries.push_back(param);
+ def.num_entries++;
+}
+
+// Use this rather than lookup_sym if you want to check for built-in types.
+// If only constructed types are valid, use lookup_sym. If basic_types_only
+// is set, then NULL will be returned if sl does not describe a built-in
+// basic type (such as int, flong, octet, etc). This option is used so that
+// the first-pass code in idlparse.y can decide whether to create a BasicType
+// or an Alias/TypeDef.
+
+Type *lookup_type(StrList *sl, NameSpace *ctx, bool basic_types_only)
+{
+ Type *type;
+ int token = sl->front()->token;
+
+ // These tokens aren't valid types, but the names can be
+ // used as identifiers.
+ switch (token) {
+ case TOK_ASYNC:
+ case TOK_INOUT:
+ case TOK_OUT:
+ token = TOK_IDENT;
+ }
+
+ // Treat it as a user-defined type if it's either not a reserved word,
+ // or if there are multiple components.
+
+ if (token == TOK_IDENT || sl->front() != sl->back()) {
+ if (basic_types_only)
+ return false;
+
+ Symbol *sym = lookup_sym(toplevel, sl, ctx);
+ type = dynamic_cast<Type *>(sym->get_concrete_sym(false));
+
+ if (!type) {
+ const String *str = sl->back();
+ yyerrorfl(str->file, str->line, "\"%s\" is not a type.",
+ sym->get_fq_name()->flatten()->c_str());
+
+ throw UserError();
+ }
+ } else {
+ CompiledBasicType cbt;
+ memset(&cbt, 0, sizeof(cbt));
+
+ switch (token) {
+ case TOK_BOOL:
+ cbt.bits = 0;
+ cbt.flags.field.Bool = 1;
+ break;
+
+ case TOK_OCTET:
+ case TOK_CHAR:
+ cbt.bits = 8;
+ cbt.flags.field.Unsigned = 1;
+ break;
+
+ case TOK_SHORT:
+ cbt.bits = 16;
+ break;
+
+ case TOK_INT:
+ cbt.bits = 32;
+ break;
+
+ case TOK_LONG:
+ cbt.bits = 64;
+ break;
+
+ case TOK_USHORT:
+ cbt.bits = 16;
+ cbt.flags.field.Unsigned = 1;
+ break;
+
+ case TOK_UINT:
+ cbt.bits = 32;
+ cbt.flags.field.Unsigned = 1;
+ break;
+
+ case TOK_ULONG:
+ cbt.bits = 64;
+ cbt.flags.field.Unsigned = 1;
+ break;
+
+ case TOK_FSHORT:
+ cbt.bits = 32;
+ cbt.flags.field.Float = 1;
+ break;
+
+ case TOK_FLONG:
+ cbt.bits = 64;
+ cbt.flags.field.Float = 1;
+ break;
+
+ default:
+ fprintf(stderr, "token %d\n", token);
+ BUG();
+ }
+
+ type = BasicType::declare(sl->front(), NULL, cbt);
+ }
+
+ return type;
+}
+
+void declare_data(NameSpace *ns, StrList *ids, StrList *type,
+ Array *array, StrList *attr)
+{
+ bool is_inline = false;
+ bool is_immutable = false;
+
+ if (attr) {
+ for (StrList::iterator i = attr->begin(); i != attr->end(); ++i) {
+ const String *this_attr = *i;
+
+ switch (this_attr->token) {
+ case TOK_INLINE:
+ is_inline = true;
+ break;
+
+ case TOK_IMMUTABLE:
+ is_immutable = true;
+ break;
+
+ default:
+ yyerrorfl(this_attr->file, this_attr->line,
+ "Invalid attribute \"%s\"", (*i)->c_str());
+ }
+ }
+ }
+
+ for (StrList::iterator i = ids->begin(); i != ids->end(); ++i) {
+ try {
+ Datum *d = Datum::declare(*i, ns, type, array);
+
+ if (is_inline)
+ d->set_inline();
+
+ if (is_immutable)
+ d->set_immutable();
+ }
+
+ catch (UserError) {
+ // An error has already been displayed, but try to
+ // continue and find more errors.
+ }
+ }
+}
+
+void declare_aliases(NameSpace *ns, StrList *ids, StrList *type,
+ bool is_typedef)
+{
+ StrList::iterator i;
+
+ for (i = ids->begin(); i != ids->end(); ++i) {
+ try {
+ if (is_typedef)
+ TypeDef::declare(*i, ns, type);
+ else
+ Alias::declare(*i, ns, type);
+ }
+
+ catch (UserError) {
+ // An error has already been displayed, but try to
+ // continue and find more errors.
+ }
+ }
+}
+
+void declare_basictypes(NameSpace *ns, StrList *ids,
+ BasicType *type, bool is_typedef)
+{
+ assert(!type->def.flags.field.TypeDef);
+ StrList::iterator i;
+
+ for (i = ids->begin(); i != ids->end(); ++i) {
+ try {
+ BasicType *bt =
+ BasicType::declare(*i, ns, type->def);
+
+ bt->def.flags.field.TypeDef = is_typedef;
+ }
+
+ catch (UserError) {
+ // An error has already been displayed, but try to
+ // continue and find more errors.
+ }
+ }
+}
+
+Array::Array(NameSpace *LOOKUP_CTX)
+{
+ lookup_ctx = LOOKUP_CTX;
+
+ for (int i = 0; i < 2; i++) {
+ cons[i].con.ucon = 0;
+ cons[i].type = TOK_UCON;
+ }
+}
+
+void Array::set_unbounded()
+{
+ for (int i = 0; i < 2; i++)
+ cons[i].type = TOK_NONE;
+}
+
+void Array::final_analysis()
+{
+ for (int i = 0; i < 2; i++) {
+ if (cons[i].type == TOK_NONE) {
+ if (i == 0)
+ ca.bounds[0] = 0;
+ else
+ ca.bounds[1] = -1;
+
+ continue;
+ }
+
+ if (cons[i].type == TOK_DCON) {
+ Symbol *sym = lookup_sym(toplevel, dcons[i], lookup_ctx);
+ datums[i] = dynamic_cast<Datum *>(sym);
+ if (!datums[i]) {
+ const String *str = dcons[i]->back();
+ yyerrorfl(str->file, str->line, "\"%s\" is not a Datum.",
+ sym->get_fq_name()->flatten()->c_str());
+ continue;
+ }
+
+ ca.bounds[i] = datums[i]->get_ucon(dcons[i]->back());
+ cons[i].type = datums[i]->con_type;
+ } else {
+ ca.bounds[i] = cons[i].con.ucon;
+ }
+
+ if (cons[i].type == TOK_FCON) {
+ yyerrorfl(strs[i]->file, strs[i]->line,
+ "\"%s\" is not an integral Datum.",
+ strs[i]->c_str());
+ throw UserError();
+ }
+
+ if (ca.bounds[i] < 0) {
+ yyerrorfl(strs[i]->file, strs[i]->line,
+ "\"%s\" %s.", strs[i]->c_str(),
+ cons[i].type == TOK_UCON ?
+ "does not fit in a signed 64-bit integer" :
+ "is negative");
+ throw UserError();
+ }
+ }
+
+ if (ca.bounds[1] >= 0 && ca.bounds[0] > ca.bounds[1]) {
+ yyerrorfl(strs[0]->file, strs[0]->line,
+ "\"%s\" is larger than \"%s\".",
+ strs[0]->c_str(), strs[1]->c_str());
+ throw UserError();
+ }
+}
+
+void Array::set_bound(Con &con, int bound)
+{
+ assert(current_pass == 1);
+
+ if (con.type == TOK_FCON) {
+ yyerrorf("Array limits must be integers.");
+ throw UserError();
+ }
+
+ if (con.type == TOK_ICON && con.con.icon <= 0) {
+ yyerrorf("Array limits must be positive");
+ throw UserError();
+ }
+
+ if (con.type == TOK_UCON && con.con.icon <= 0) {
+ yyerrorf("Array limits must fit in a signed 64-bit integer");
+ throw UserError();
+ }
+
+ cons[bound] = con;
+
+ if (con.type == TOK_DCON) {
+ assert(con.con.dcon);
+ dcons[bound] = con.con.dcon;
+ strs[bound] = con.con.dcon->flatten();
+ } else {
+ std::ostringstream ss(std::ostringstream::out);
+
+ switch (con.type) {
+ case TOK_FCON:
+ ss << con.con.fcon;
+ break;
+
+ case TOK_UCON:
+ ss << con.con.ucon;
+ break;
+
+ case TOK_ICON:
+ ss << con.con.icon;
+ break;
+
+ case TOK_NONE:
+ if (bound == 0)
+ ss << "0";
+
+ break;
+
+ default:
+ BUG();
+ }
+
+ strs[bound] = new String(ss.str().c_str(), cur_input_file,
+ curline, con.type);
+ }
+}
+
+
+void UserNameSpace::output_lang(LangCallback *lcb, int arg1, void *arg2)
+{
+ lcb->output(this, arg1, arg2);
+}
+
+void BasicType::output_lang(LangCallback *lcb, int arg1, void *arg2)
+{
+ lcb->output(this, arg1, arg2);
+}
+
+void Interface::output_lang(LangCallback *lcb, int arg1, void *arg2)
+{
+ lcb->output(this, arg1, arg2);
+}
+
+void Struct::output_lang(LangCallback *lcb, int arg1, void *arg2)
+{
+ lcb->output(this, arg1, arg2);
+}
+
+void BitField::output_lang(LangCallback *lcb, int arg1, void *arg2)
+{
+ lcb->output(this, arg1, arg2);
+}
+
+void Enum::output_lang(LangCallback *lcb, int arg1, void *arg2)
+{
+ lcb->output(this, arg1, arg2);
+}
+
+void Alias::output_lang(LangCallback *lcb, int arg1, void *arg2)
+{
+ lcb->output(this, arg1, arg2);
+}
+
+void TypeDef::output_lang(LangCallback *lcb, int arg1, void *arg2)
+{
+ lcb->output(this, arg1, arg2);
+}
+
+void Datum::output_lang(LangCallback *lcb, int arg1, void *arg2)
+{
+ if (def.flags.field.Const)
+ lcb->output(this, arg1, arg2);
+}
+
+void Method::output_lang(LangCallback *lcb, int arg1, void *arg2)
+{
+}
+
+void Param::output_lang(LangCallback *lcb, int arg1, void *arg2)
+{
+}
+
+int Interface::all_supers_traversal;
--- /dev/null
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <cctype>
+
+#include <idlc.h>
+#include <util.h>
+
+std::ostream &operator << (std::ostream &ostr, Indent &ind)
+{
+ for (int i = 0; i < ind.indent_level; i++)
+ ostr << '\t';
+
+ for (int i = 0; i < ind.align_spaces; i++)
+ ostr << ' ';
+
+ return ostr;
+}
+
+static bool check_empty(const char *dirname)
+{
+ struct dirent ent, *entp;
+
+ DIR *dir = opendir(dirname);
+ if (!dir) {
+ fprintf(stderr, "Cannot open directory \"%s\".\n",
+ dirname);
+
+ throw UserError();
+ }
+
+ do {
+ // readdir_r is buggy on osx 10.2, and will fail if errno is
+ // non-zero.
+ errno = 0;
+
+ if (readdir_r(dir, &ent, &entp) && errno != EEXIST) {
+ fprintf(stderr, "2 Cannot readdir on \"%s\": %s.\n",
+ dirname, strerror(errno));
+
+ closedir(dir);
+ throw UserError();
+ }
+ } while (entp && (!strcmp(ent.d_name, ".") ||
+ !strcmp(ent.d_name, "..")));
+
+ closedir(dir);
+
+ if (!entp)
+ return true;
+
+ return false;
+}
+
+void makepath(const char *_path, bool require_empty)
+{
+ string path(_path);
+ string::size_type pos = 0, last = 0;
+
+ while (pos != string::npos) {
+ pos = path.find('/', pos + 1);
+ string component(path, 0, pos);
+
+ if (component.length() - last == 1) {
+ last = pos;
+ continue;
+ }
+
+ last = pos;
+
+ if (mkdir(component.c_str(), 0777) && errno != EEXIST) {
+ fprintf(stderr, "Cannot create directory \"%s\": %s.\n",
+ component.c_str(), strerror(errno));
+
+ throw UserError();
+ }
+ }
+
+ if (require_empty && !check_empty(_path)) {
+ fprintf(stderr, "Directory \"%s\" is not empty.\n", _path);
+ throw UserError();
+ }
+}
+
+static inline int hex_to_bin(int hex)
+{
+ if (hex >= 'a')
+ return hex - 'a' + 10;
+
+ if (hex >= 'A')
+ return hex - 'A' + 10;
+
+ return hex - '0';
+}
+
+void parse_guid(const char *guid_str, char *guid_bin)
+{
+ bool second_hex_digit = false;
+ int i;
+
+ for (i = 0; i < 36; i++) {
+ if (i == 8 || i == 13 || i == 18 || i == 23) {
+ if (guid_str[i] != '-')
+ goto bad;
+ } else {
+ if (!isxdigit(guid_str[i]))
+ goto bad;
+
+ if (second_hex_digit) {
+ *guid_bin++ |= hex_to_bin(guid_str[i]);
+ second_hex_digit = false;
+ } else {
+ *guid_bin = hex_to_bin(guid_str[i]) << 4;
+ second_hex_digit = true;
+ }
+ }
+ }
+
+ if (guid_str[i] == 0)
+ return;
+
+bad:
+ yyerrorf("\"%s\" is not a valid GUID.", guid_str);
+ throw UserError();
+}
--- /dev/null
+#ifndef UTIL_H
+#define UTIL_H
+
+#include <iostream>
+#include <stdint.h>
+
+// Feed an indent struct into an ostream to get "indent_level" tabs
+// and "align_spaces" spaces.
+
+struct Indent {
+ int indent_level;
+ int align_spaces;
+};
+
+std::ostream &operator << (std::ostream &ostr, Indent &ind);
+
+// Creates the specified directory, including all parent directories that
+// do not yet exist. Gives the user an error if any component could not
+// be created, or (if require_empty is set) if the final directory is not
+// empty.
+
+void makepath(const char *path, bool require_empty = true);
+
+// Turns an ASCII GUID into a binary GUID. Prints a message
+// and throws a UserError if the ASCII GUID is invalid.
+// guid_bin must point to a 16-byte region of memory.
+
+void parse_guid(const char *guid_str, char *guid_bin);
+
+#endif
--- /dev/null
+#include <orb.h>
+
+struct IFaceTable {
+ unsigned char *guid;
+ ptrdiff_t offset;
+};
+
+namespace System {
+ struct _i_Object {
+ static const unsigned char guid[];
+
+ struct info_type {
+ // The concrete field is used by the ORB to locate the
+ // object ID (stored one word before the concrete pointer).
+ // The concrete field is zero except where multiple
+ // inheritance causes there to be multiple table pointers in
+ // the object. When that happens, the concrete field is the
+ // number of bytes (either 0 or negative) to add to the
+ // pointer to yield a pointer to the most derived interface
+ // (of which there can be only one).
+
+ ptrdiff_t concrete;
+
+ // This is a pointer to the most derived interface's
+ // interface table, for use by a downcast method that does
+ // not know the real type yet.
+
+ ::System::RunTime::IFaceTable *concrete_IFaceTable;
+
+ // A list of interfaces follows. Each entry in the list
+ // consists of a pointer to the GUID for the entry's
+ // interface, followed by an offset to add to a pointer of
+ // this interface to obtain a pointer to this entry's
+ // interface. This list is scanned during downcast()
+ // operations. The entries are used directly (no scanning)
+ // for upcast operations, and the first entry (which must be
+ // the for this interface) is used by the ORB to verify the
+ // GUID of objects passed.
+ //
+ // The offsets must be dynamic, because an interface can
+ // only have one pointer regardless of how many times it is
+ // inherited by an interface's parents, and thus the pointer
+ // adjustment will not be the same for all pointers of the
+ // same inner interface.
+ //
+ // For now, this table is ordered by the for_all_supers
+ // traversal, but eventually it'd be nice to sort it
+ // by GUID pointer (and yes, that change will break ABI
+ // compatibility).
+
+ ::System::RunTime::IFaceTable System_IDLIDL_Object;
+
+ // This is a NULL pointer to mark the end of the GUID
+ // list.
+ unsigned char *end;
+
+ // Finally, the method pointers are provided.
+ // There are currently no methods in System::Object.
+
+ struct methods {
+ } methods;
+ };
+
+ const info_type *info;
+ };
+
+ class Object {
+ _i_Object *_ptr;
+
+ public:
+ Object()
+ {
+ _ptr = NULL;
+ }
+
+ Object(_i_Object *other)
+ {
+ _ptr = other;
+ }
+
+ operator _i_Object *()
+ {
+ return _ptr;
+ }
+ };
+
+
+ namespace RunTime {
+ template<typename T> struct Array {
+ T *ptr;
+ size_t count;
+
+ bool valid_index(size_t index)
+ {
+ return index >= 0 && index < count;
+ }
+
+ T &operator[](size_t index)
+ {
+#ifndef NO_ARRAY_BOUNDS_CHECK
+ if (!valid_index(index))
+ throw SomeException();
+#endif
+
+ return ptr[index];
+ }
+ };
+
+ uintptr_t downcast(::System::_i_Object *obj, unsigned char *new_guid)
+ {
+ IFaceTable *tbl = obj->info->concrete_IFaceTable;
+ int index = 0, found_index = -1;
+
+ while (tbl->guid != new_guid) {
+ if (!tbl->guid)
+ return NULL;
+
+ tbl++;
+ }
+
+ uintptr_t ptr = (uintptr_t)obj;
+
+ ptr += obj->info->concrete;
+ ptr += tbl->offset;
+
+ return ptr;
+ };
+ };
+
+ struct NotifierInfo {
+ };
+};
+
+namespace System {
+namespace IO {
+ struct NotifierInfo : public ::System::NotifierInfo {
+ uint64_t len;
+
+ // Yuck. Hack to work around broken enum-scoping and
+ // undefined enum-width.
+ struct result {
+ static const uint32_t Success = 0;
+ static const uint32_t NoMoreData = 1;
+ static const uint32_t NoMoreSpace = 2;
+ static const uint32_t IOError = 3;
+ static const uint32_t BadHandle = 4;
+ static const uint32_t PipeClosed = 5;
+
+ // Double yuck... Is there any sane way to avoid exposing
+ // a leading underscore to the user, without potential conflicts
+ // with enum values? Perhaps "enumval" could be made a reserved
+ // word...
+ uint32_t _val;
+ } result;
+ };
+
+ // Note the absence of "virtual"... We don't want to use the
+ // compiler's virtual table implementation, as we can't rely on it
+ // to be compatible with any other language or compiler (or even
+ // other versions of itself), and we also need the object ID to be
+ // at a specific offset from the pointer. We manage all virtual
+ // table operations (including RTTI) ourselves. Note that it is
+ // an error if the compiler emits a virtual table (or anything
+ // else not asked for) for any reason.
+
+ // Unfortunately, this could deprive some compilers of optimization
+ // information (such as for inlining when the concrete type is known
+ // and the implementation is present in the same compilation unit).
+ // A compiler which explicitly supports our standard vtable layout
+ // could use alternate stub code that makes use of such support.
+
+ struct _i_IStream {
+ static const unsigned char guid[];
+
+ struct info_type {
+ // The info struct from the closest superinterface
+ // which shares this pointer is included first.
+ ::System::_i_Object::info_type parent;
+
+ struct iface_table {
+ IFaceTable System_IDLIDL_IO_IDLIDL_IStream;
+ IFaceTable System_IDLIDL_Object;
+ unsigned char *_end;
+ } iface_table;
+
+ struct methods {
+ void (*read)(::System::IO::_i_IStream *_this,
+ Array<char> *buf, uint64_t *len);
+
+ void (*read_async)(::System::IO::_i_IStream *_this,
+ Array<unsigned char> *buf,
+ ::System::Notifier *notifier);
+ } methods;
+ } info;
+ };
+
+ class IStream {
+ _i_IStream *_ptr;
+
+ public:
+ void read(unsigned char &*buf, size_t &_count_buf, uint64_t &len)
+ {
+ _ptr->info->methods.read(this, buf, _count_buf, len);
+ };
+
+ void read_async(unsigned char *buf, size_t _count_buf,
+ ::System::Notifier *notifier)
+ {
+ _ptr->info->methods.read_async(this, buf, _count_buf, notifier);
+ };
+
+ IStream()
+ {
+ _ptr = NULL;
+ }
+
+ IStream(_i_IStream *other)
+ {
+ _ptr = other;
+ }
+
+ operator _i_IStream *()
+ {
+ return _ptr;
+ }
+
+ // Returns NULL if the downcast fails. Upcasts never fail.
+ // "downcast" is (will be) a reserved word in IDL, or at least
+ // in the C++ binding.
+
+ static IStream downcast(::System::Object oldptr)
+ {
+ return IStream(reinterpret_cast<_i_IStream *>
+ (::System::RunTime::downcast(oldptr, _i_IStream::guid)));
+ }
+
+ operator ::System::Object()
+ {
+ uintptr_t ptr = reinterpret_cast<uintptr_t>(this);
+ ptr += _ptr->info->System_IDLIDL_Object.offset;
+ return ::System::Object(reinterpret_cast<::System::_i_Object *>(ptr));
+ }
+ };
+
+ struct OStream {
+ static const unsigned char guid[];
+
+ struct info_type {
+ ::System::Object::info parent;
+
+ struct guid_table {
+ unsigned char *System_IDLIDL_IO_IDLIDL_OStream;
+ unsigned char *System_IDLIDL_Object;
+ unsigned char *_end;
+ } guids;
+
+ struct cast_table {
+ int32_t System_IDLIDL_IO_IDLIDL_OStream;
+ int32_t System_IDLIDL_Object;
+ } casts;
+
+ struct methods {
+ void (*write)(::System::IO::_i_OStream *_this, unsigned char *buf,
+ size_t *_count_buf, uint64_t *len);
+
+ void (*write_async)(::System::IO::_i_OStream *this, unsigned char *buf,
+ size_t count_buf, ::System::Notifier *notifier);
+ } methods;
+ };
+
+ const info_type info;
+
+ void write(unsigned char *buf, size_t _count_buf, uint64_t &len)
+ {
+ info->methods.write(this, buf, _count_buf, len);
+ };
+
+ void write_async(unsigned char *buf, size_t _count_buf,
+ ::System::Notifier *notifier)
+ {
+ info->methods.write_async(this, buf, _count_buf, notifier);
+ };
+
+ static ::System::IO::OStream *downcast(::System::Object *obj)
+ {
+ return (::System::IO::OStream *)
+ ::System::RunTime::downcast(obj, ::System::IO::OStream::guid);
+ }
+
+ ::System::Object *upcast(::System::Object *type)
+ {
+ uintptr_t ptr = (uintptr_t)this;
+ ptr += info->casts.System_IDLIDL_Object;
+ return (::System::Object *)ptr;
+ }
+ };
+
+
+ struct _i_IOStream {
+ static const unsigned char guid[];
+
+ struct info_type {
+ // Share a pointer with the left-most superinterface
+ ::System::IO::_i_IStream::info parent;
+
+ struct guid_table {
+ unsigned char *System_IDLIDL_IO_IDLIDL_IOStream;
+ unsigned char *System_IDLIDL_IO_IDLIDL_IStream;
+ unsigned char *System_IDLIDL_IO_IDLIDL_OStream;
+ unsigned char *System_IDLIDL_Object;
+ unsigned char *_end;
+ } guids;
+
+ struct cast_table {
+ int32_t System_IDLIDL_IO_IDLIDL_IOStream;
+ int32_t System_IDLIDL_IO_IDLIDL_IStream;
+ int32_t System_IDLIDL_IO_IDLIDL_OStream;
+ int32_t System_IDLIDL_Object;
+ } casts;
+
+ struct methods {
+ void (*read)(::System::IO::_i_IStream *_this,
+ Array<char> *buf, uint64_t *len);
+
+ void (*read_async)(::System::IO::_i_IStream *_this,
+ Array<unsigned char> *buf,
+ ::System::Notifier *notifier);
+
+ void (*write)(::System::IO::_i_OStream *_this, unsigned char *buf,
+ size_t *_count_buf, uint64_t *len);
+
+ void (*write_async)(::System::IO::_i_OStream *this, unsigned char *buf,
+ size_t count_buf, ::System::Notifier *notifier);
+ } methods;
+ }
+
+ const info_type *info;
+
+ ::System::_i_OStream::info *info_System_IDLIDL_OStream;
+ };
+
+ class IOStream {
+ _i_IOStream *_ptr;
+
+ public:
+ void read(unsigned char &*buf, size_t &_count_buf, uint64_t &len)
+ {
+ info->parent.methods.read(this, buf, _count_buf, len);
+ };
+
+ void read_async(unsigned char *buf, size_t _count_buf,
+ ::System::Notifier *notifier)
+ {
+ info->parent.methods.read_async(this, buf, _count_buf, notifier);
+ };
+
+ void write(unsigned char *buf, size_t _count_buf, uint64_t &len)
+ {
+ ::System::OStream *thisptr = upcast((::System::Ostream *)NULL);
+ info_System_IDLIDL_OStream.methods.write(this, buf, _count_buf, len);
+ };
+
+ void write_async(unsigned char *buf, size_t _count_buf,
+ ::System::Notifier *notifier)
+ {
+ info_System_IDLIDL_OStream.methods.write_async
+ (this, buf, _count_buf, notifier);
+ };
+
+ static ::System::IO::OStream *downcast(::System::Object *obj)
+ {
+ return (::System::IO::OStream *)
+ ::System::RunTime::downcast(obj, ::System::IO::OStream::guid);
+ }
+
+ ::System::Object *upcast(::System::Object *type)
+ {
+ uintptr_t ptr = (uintptr_t)this;
+ ptr += info->casts.System_IDLIDL_Object;
+ return (::System::Object *)ptr;
+ }
+ };
+
+
+
+
+
+
+ struct OStream : public ::System::Object {
+ void write(unsigned char *buf, size_t _count_buf, uint64_t &len) = 0;
+ };
+
+ struct IOStream : public IStream, OStream {
+ };
+
+ struct FStream : public IOStream {
+ void set_file(File *f) = 0;
+ void get_file(File &*f) = 0;
+ File *get_file() {
+ File *ret;
+ get_file(ret);
+ return ret;
+ };
+ };
+
+
+
+
+
+
+ class PhysAddrSpace {
+ public:
+ };
+
+ struct Region {
+ uint64_t start, len;
+ };
+
+ class MapHandle {
+ void **_vtable;
+
+ public:
+ _inproc_MapHandle(void **vtable) : _vtable(vtable)
+ {
+
+ }
+
+ void unmap(Region *regions, size_t count)
+ {
+ }
+ };
+
+ class _remote_MapHandle : public MapHandle {
+ public:
+ void unmap(Region *regions, size_t count)
+ {
+
+ }
+ };
+
+ // If implemented here, user supplies:
+ class RealMapHandle : public MapHandle {
+ void unmap(Region *regions, size_t count) {
+ whatever
+ }
+ };
+
+ // This goes in a footer included in a non-header after the user's
+ // header, or perhaps in an idlc-generated .cc file (which would
+ // then need to know where the user's headers are located).
+ void _entries_RealMapHandle() {
+ // -Wno-pmf-conversions please...
+ typedef void (*unmap)(RealMapHandle *, Region *, size_t);
+
+ // x86:
+ asm volatile(".global _entry_RealMapHandle\n"
+ "_entry_RealMapHandle:"
+ "pushl %%eax;"
+ "pushl %%ecx;"
+ "jmpl %0;" : : "i" ((unmap)&RealMapHandle::MapHandle));
+
+ // alpha:
+ // Need to make sure that the branch is within range, or
+ // else use jmp.
+ asm volatile(".global _entry_RealMapHandle\n"
+ "_entry_RealMapHandle:"
+ "ldq $a1, 0(%sp);"
+ "ldq $a2, 8(%sp);"
+ "br %0;" : : "i" ((unmap)&RealMapHandle::MapHandle));
+ }
+
+ // FIXME: probably use sections here instead of a global
+ // constructor. This way, initialization can be ordered,
+ // and it'd be easier to extract a list of implemented
+ // classes from the binary.
+
+ class _init_RealMapHandle() {
+ public:
+ _init_RealMapHandle()
+ {
+ // register
+ }
+
+ ~_init_RealMapHandle()
+ {
+ // deregister -- or don't bother because it's
+ // the end of the program and it'll be unregistered
+ // automatically anyway?
+ }
+ };
+
+ _init_RealMapHandle _initinstance_RealMapHandle;
+
+ // If not implemented here...
+
+ class RealMapHandle : public MapHandle {
+ void unmap(Region *regions, size_t count) {
+ asm volatile("
+ }
+ };
+
+
+
+
+ class Mappable {
+ public:
+ uint64_t get_size() = 0;
+ void get_size(uint64_t &size) = 0;
+
+ uint64_t get_block_size() = 0;
+ void get_block_size(uint64_t &block_size) = 0;
+
+
+ };
+}}
--- /dev/null
+TOP := $(shell dirname `pwd -P`)
+COMP := include
+BUILDTYPE := build
+NODEPS := y
+include ../Makefile.head
+LANGS := c++
+
+TARGETS := $(LANGS:%=$(BUILDDIR)/generated/%)
+
+$(TARGETS): $(IFACES)
+ @echo $(COMP): System IDL-generated include files
+ @$(RMDIR) $@
+ @if ! $(IDLC) -o $@ -l `basename $@` -s System -i $(IFACES) -t $(ARCH); then \
+ $(RMDIR) "$@"; \
+ false; \
+ fi
+
+include ../Makefile.tail
--- /dev/null
+#ifndef _KERNEL_KERNEL_H
+#define _KERNEL_KERNEL_H
+
+#ifdef _KERNEL
+#define _KERNEL_SERVER2() servers/lib/kernel
+#else
+#define _KERNEL_SERVER2() servers
+#endif
+
+#define _KERNEL_SERVER(SERVER) <_KERNEL_SERVER2()/SERVER>
+
+#endif
--- /dev/null
+#ifndef _KERNEL_REGION_H
+#define _KERNEL_REGION_H
+
+#include <System/Mem.h>
+
+// Overlapping regions are not well ordered.
+
+static inline bool operator <(::System::Mem::Region &left,
+ ::System::Mem::Region &right)
+{
+ return left.start < right.start;
+}
+
+static inline bool operator <(::System::Mem::Region &left, uint64_t right)
+{
+ return left.end < right;
+}
+
+static inline bool operator >(::System::Mem::Region &left, uint64_t right)
+{
+ return left.start > right;
+}
+
+namespace Mem {
+ static inline ::System::Mem::RegionWithOffset
+ add_offset(::System::Mem::Region ®ion)
+ {
+ ::System::Mem::RegionWithOffset ret;
+ ret.start = region.start;
+ ret.end = region.end;
+ ret.offset = 0;
+ return ret;
+ }
+
+ static inline ::System::Mem::Region
+ remove_offset(::System::Mem::RegionWithOffset ®ion)
+ {
+ ::System::Mem::Region ret;
+ ret.start = region.start;
+ ret.end = region.end;
+ return ret;
+ }
+}
+
+#endif
--- /dev/null
+#ifndef _KERNEL_TIME_H
+#define _KERNEL_TIME_H
+
+#include <kernel/kernel.h>
+#include <System/Time.h>
+#include <util/heap.h>
+#include <util/spinlock.h>
+
+namespace Time {
+ static inline void fix_time_add(System::Time::Time &time)
+ {
+ if (time.nanos >= 1000000000) {
+ time.nanos -= 1000000000;
+ time.seconds++;
+ }
+ }
+
+ static inline void fix_time_sub(System::Time::Time &time)
+ {
+ if ((int32_t)time.nanos < 0) {
+ time.nanos += 1000000000;
+ time.seconds--;
+ }
+ }
+}
+
+static inline long operator < (const System::Time::Time &left,
+ const System::Time::Time &right)
+{
+ return left.seconds - right.seconds < 0 ||
+ (left.seconds == right.seconds &&
+ left.nanos < right.nanos);
+}
+
+static inline long operator == (const System::Time::Time &left,
+ const System::Time::Time &right)
+{
+ return left.seconds == right.seconds &&
+ left.nanos == right.nanos;
+}
+
+static inline long operator != (const System::Time::Time &left,
+ const System::Time::Time &right)
+{
+ return left.seconds != right.seconds ||
+ left.nanos != right.nanos;
+}
+
+static inline System::Time::Time operator +(const System::Time::Time &left,
+ const System::Time::Time &right)
+{
+ System::Time::Time ret;
+
+ ret.seconds = left.seconds + right.seconds;
+ ret.nanos = left.nanos + right.nanos;
+
+ Time::fix_time_add(ret);
+ return ret;
+}
+
+static inline System::Time::Time &operator +=(System::Time::Time &left,
+ const System::Time::Time &right)
+{
+ left.seconds += right.seconds;
+ left.nanos += right.nanos;
+
+ Time::fix_time_add(left);
+ return left;
+}
+
+static inline System::Time::Time operator -(const System::Time::Time &left,
+ const System::Time::Time &right)
+{
+ System::Time::Time ret;
+
+ ret.seconds = left.seconds - right.seconds;
+ ret.nanos = left.nanos - right.nanos;
+
+ Time::fix_time_sub(ret);
+ return ret;
+}
+
+static inline System::Time::Time &operator -=(System::Time::Time &left,
+ const System::Time::Time &right)
+{
+ left.seconds -= right.seconds;
+ left.nanos -= right.nanos;
+
+ Time::fix_time_sub(left);
+ return left;
+}
+
+static inline System::Time::Time operator +(const System::Time::Time &left,
+ uint32_t right)
+{
+ System::Time::Time ret;
+
+ ret.seconds = left.seconds;
+ ret.nanos = left.nanos + right;
+
+ Time::fix_time_add(ret);
+ return ret;
+}
+
+static inline System::Time::Time &operator +=(System::Time::Time &left,
+ uint32_t right)
+{
+ left.nanos += right;
+
+ Time::fix_time_add(left);
+ return left;
+}
+
+static inline System::Time::Time operator -(const System::Time::Time &left,
+ uint32_t right)
+{
+ System::Time::Time ret;
+
+ ret.seconds = left.seconds;
+ ret.nanos = left.nanos - right;
+
+ Time::fix_time_sub(ret);
+ return ret;
+}
+
+static inline System::Time::Time &operator -=(System::Time::Time &left,
+ uint32_t right)
+{
+ left.nanos -= right;
+
+ Time::fix_time_sub(left);
+ return left;
+}
+
+namespace Time {
+ typedef System::Time::Clock Clock;
+ typedef System::Time::Time Time;
+ typedef System::Time::ITime ITime;
+ typedef System::Time::Timer Timer;
+ typedef System::Time::ITimer ITimer;
+
+ using namespace Lock;
+ struct TimerEntry;
+
+ class TimerMux {
+ Util::Heap<TimerEntry> heap;
+ SpinLock lock;
+ Clock clock;
+ Timer parent;
+
+ public:
+ TimerMux(Clock CLOCK, Timer PARENT) :
+ clock(CLOCK), parent(PARENT)
+ {
+ }
+
+ void arm(TimerEntry *entry, Time expiry);
+ void disarm(TimerEntry *entry);
+ void run();
+ };
+
+ struct TimerEntry {
+ Util::Heap<TimerEntry>::Node heap_node;
+ Time expiry;
+ TimerMux *mux;
+
+ #include _KERNEL_SERVER(time/timer/Time/TimerEntry.h)
+
+ TimerEntry(TimerMux *MUX = NULL) : mux(MUX)
+ {
+ init_iface();
+
+ expiry.seconds = 0;
+ expiry.nanos = 0;
+ }
+
+ virtual ~TimerEntry()
+ {
+ }
+
+ void arm(Time expiry)
+ {
+ mux->arm(this, expiry);
+ }
+
+ void disarm()
+ {
+ mux->disarm(this);
+ }
+
+ void get_expiry(Time *ret, uint8_t *was_armed)
+ {
+ if (!ret || !was_armed) {
+ // FIXME: throw exception
+ return;
+ }
+
+ if (is_armed()) {
+ *ret = expiry;
+ *was_armed = 1;
+ } else {
+ *was_armed = 0;
+ }
+ }
+
+ long operator < (TimerEntry &other)
+ {
+ return expiry < other.expiry;
+ }
+
+ bool is_armed()
+ {
+ return heap_node.is_on_heap();
+ }
+
+ void set_action(System::Events::Event *event);
+
+ // This can be overridden in low-level code (specifically
+ // the kernel) to execute the action synchronously,
+ // inside the timer critical section. Most code should
+ // (and non-C++ code must) use set_action instead.
+
+ virtual void action();
+ };
+}
+
+#endif
--- /dev/null
+#ifndef _CPP_ORB_H
+#define _CPP_ORB_H
+
+#include <stdint.h>
+#include <stddef.h>
+#include <string.h>
+
+namespace System {
+ struct _i_Object;
+
+ namespace Mem {
+ struct Region;
+ }
+
+ namespace RunTime {
+ class ORBMM {
+ void *priv;
+
+ public:
+ class AllocGroup {
+ friend class ORBMM;
+ };
+
+ typedef ::System::Mem::Region Region;
+
+ ORBMM();
+
+ void *alloc(size_t size, AllocGroup *group = NULL);
+
+ void retain(Region region);
+ void release(Region region);
+ void super_retain(Region region);
+ void super_release(Region region);
+ AllocGroup *create_group();
+ void destroy_group(AllocGroup *group);
+ void *add_region(Region region);
+ };
+
+ extern ORBMM *orbmm;
+
+ // FIXME: should be an IDL exception
+ struct ArrayException
+ {
+ };
+
+ class NullArray {
+ };
+
+ static const NullArray nullarray = {};
+
+ template<typename T> struct MutableArray {
+ T *ptr;
+ size_t count;
+
+ bool valid_index(size_t index)
+ {
+ return index >= 0 && index < count;
+ }
+
+ T &operator[](size_t index)
+ {
+#ifndef POLINTOS_NO_ARRAY_BOUNDS_CHECK
+ if (!valid_index(index))
+ throw ArrayException();
+#endif
+
+ return ptr[index];
+ }
+
+ MutableArray()
+ {
+ ptr = NULL;
+ count = 0;
+ }
+
+ MutableArray(NullArray na)
+ {
+ ptr = NULL;
+ count = 0;
+ }
+
+ MutableArray(T *PTR, size_t COUNT)
+ {
+ ptr = PTR;
+ count = COUNT;
+ }
+
+ MutableArray &slice_nocheck(size_t first, size_t newcount)
+ {
+ MutableArray ret;
+ ret.ptr = ptr + first;
+ ret.count = newcount;
+ return ret;
+ }
+
+ MutableArray &slice_nocheck(size_t first)
+ {
+ MutableArray ret;
+ ret.ptr = ptr + first;
+ ret.count = count - first;
+ return ret;
+ }
+
+ MutableArray &slice(size_t first, size_t count)
+ {
+#ifndef POLINTOS_NO_ARRAY_BOUNDS_CHECK
+ if (!valid_index(first) || !valid_index(first + count - 1))
+ throw ArrayException();
+#endif
+
+ return slice_nocheck(first, count);
+ }
+
+ MutableArray &slice(size_t first)
+ {
+#ifndef POLINTOS_NO_ARRAY_BOUNDS_CHECK
+ if (!valid_index(first))
+ throw ArrayException();
+#endif
+
+ return slice_nocheck(first);
+ }
+
+ MutableArray copy()
+ {
+ MutableArray new_arr;
+ new_arr.ptr = new(orbmm) T[count];
+ new_arr.count = count;
+ memcpy(new_arr.ptr, ptr, count);
+ return new_arr;
+ }
+ };
+
+ template<typename T> struct Array {
+ const T *ptr;
+ size_t count;
+
+ bool valid_index(size_t index)
+ {
+ return index >= 0 && index < count;
+ }
+
+ const T &operator[](size_t index)
+ {
+#ifndef POLINTOS_NO_ARRAY_BOUNDS_CHECK
+ if (!valid_index(index))
+ throw ArrayException();
+#endif
+
+ return ptr[index];
+ }
+
+ Array()
+ {
+ ptr = NULL;
+ count = 0;
+ }
+
+ Array(NullArray na)
+ {
+ ptr = NULL;
+ count = 0;
+ }
+
+ Array(const T *PTR, size_t COUNT)
+ {
+ ptr = PTR;
+ count = COUNT;
+ }
+
+ Array(MutableArray<T> ma)
+ {
+ ptr = ma.ptr;
+ count = ma.count;
+ }
+
+ MutableArray<T> constcast()
+ {
+ MutableArray<T> ma;
+ ma.ptr = const_cast<T>(ptr);
+ ma.count = count;
+ return ma;
+ }
+
+ Array &slice_nocheck(size_t first, size_t newcount)
+ {
+ Array ret;
+ ret.ptr = ptr + first;
+ ret.count = newcount;
+ return ret;
+ }
+
+ Array &slice_nocheck(size_t first)
+ {
+ Array ret;
+ ret.ptr = ptr + first;
+ ret.count = count - first;
+ return ret;
+ }
+
+ Array &slice(size_t first, size_t count)
+ {
+#ifndef POLINTOS_NO_ARRAY_BOUNDS_CHECK
+ if (!valid_index(first) || !valid_index(first + count - 1))
+ throw ArrayException();
+#endif
+
+ return slice_nocheck(first, count);
+ }
+
+ Array &slice(size_t first)
+ {
+#ifndef POLINTOS_NO_ARRAY_BOUNDS_CHECK
+ if (!valid_index(first))
+ throw ArrayException();
+#endif
+
+ return slice_nocheck(first);
+ }
+
+ MutableArray<T> copy()
+ {
+ MutableArray<T> new_arr;
+ new_arr.ptr = new(orbmm) T[count];
+ new_arr.count = count;
+ memcpy(new_arr.ptr, ptr, count);
+ return new_arr;
+ }
+ };
+
+ static inline Array<uint8_t> countarray(const char *ptr)
+ {
+ Array<uint8_t> ret;
+ ret.ptr = reinterpret_cast<const uint8_t *>(ptr);
+ ret.count = strlen(ptr);
+ return ret;
+ }
+
+ static inline MutableArray<uint8_t> countarray(char *ptr)
+ {
+ MutableArray<uint8_t> ret;
+ ret.ptr = reinterpret_cast<uint8_t *>(ptr);
+ ret.count = strlen(ptr);
+ return ret;
+ }
+
+ struct IFaceTable {
+ const unsigned long *const guid;
+ const ptrdiff_t offset;
+ };
+
+ struct VStructInfo {
+ // List of GUIDs of the struct and its superstructs,
+ // starting with System.VStruct and ending with
+ // the concrete struct.
+
+ const unsigned long *const *const guids;
+
+ // Length of inheritance chain; 1 for System.VStruct
+
+ const int chainlen;
+ };
+
+ uintptr_t downcast(::System::_i_Object *obj,
+ const unsigned long *new_guid);
+
+ static inline bool guids_equal(const unsigned long *guid1,
+ const unsigned long *guid2)
+ {
+ return (guid1[0] == guid2[0] &&
+ guid1[1] == guid2[1] &&
+ (sizeof(long) == 8 ||
+ (guid1[2] == guid2[2] &&
+ guid1[3] == guid2[3])));
+ }
+
+ // Return the caller's PC. It'd be nice if GCC had a builtin for
+ // the current PC, so that a simple relocation could be used rather
+ // than a function call. OPT: put this in lowlevel-lib, so that
+ // architectures can provide a faster version.
+
+ unsigned long get_pc();
+
+ struct ParamInfoBlock {
+ uintptr_t buffer_size;
+ void **objlist_ptr;
+ uintptr_t objlist_len;
+ void **ptrlist_ptr;
+ uintptr_t ptrlist_len;
+ uintptr_t num_segments;
+
+ struct Segment {
+ void *ptr;
+ uintptr_t len;
+ uintptr_t flags;
+ } segments[0];
+ };
+ }
+}
+
+inline void *operator new(size_t len, ::System::RunTime::ORBMM *orbmm,
+ ::System::RunTime::ORBMM::AllocGroup *group = NULL)
+{
+ return orbmm->alloc(len, group);
+}
+
+inline void *operator new[](size_t len, ::System::RunTime::ORBMM *orbmm,
+ ::System::RunTime::ORBMM::AllocGroup *group = NULL)
+{
+ return orbmm->alloc(len, group);
+}
+
+// This is a macro rather than an inline template function so that the
+// caller shows up as file/line number in the debugging information rather
+// than this header file, and so that a variable argument list can be
+// passed to a fixed arg ctor.
+//
+// To throw an IDL exception of type Foo, do this:
+// throw_idl(Foo, args, to, foo);
+
+#ifndef POLINTOS_NO_THROW_IDL
+#define throw_idl(T, args...) do { \
+ throw T(NULL, NULL, \
+ new(::System::RunTime::orbmm) \
+ ::System::Exceptions::NativeCodeExceptionOriginInfo \
+ (::System::RunTime::get_pc()), \
+ _KERNEL ? 1 : 0, ##args); \
+} while (0)
+
+#define rethrow_idl(oldex, T, args...) do { \
+ throw T(new(::System::RunTime::orbmm) typeof(oldex)(oldex), NULL, \
+ new(::System::RunTime::orbmm) \
+ ::System::Exceptions::NativeCodeExceptionOriginInfo \
+ (::System::RunTime::get_pc()), \
+ _KERNEL ? 1 : 0, ##args); \
+} while (0)
+#endif
+
+#endif
--- /dev/null
+#ifndef _UTIL_ASSERT_H
+#define _UTIL_ASSERT_H
+
+namespace Assert {
+ enum {
+ Always = 0,
+ Normal = 1,
+ Excessive = 2
+ };
+
+ static inline void __attribute__((always_inline))
+ do_assert(const char *file, int line, bool condition, int level)
+ {
+ void level_must_be_const();
+
+ if (!__builtin_constant_p(level))
+ level_must_be_const();
+
+ if (level <= _UTIL_ASSERT_LEVEL && __builtin_expect(!condition, 0))
+ assert_failure(file, line);
+ }
+}
+
+#define assertl(cond, level) (::Assert::do_assert(__FILE__, __LINE__, cond, level))
+#define BUG() assertl(0, ::Assert::Always)
+
+#endif
--- /dev/null
+// util/heap.h -- A heap-based priority queue implementation
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#ifndef _UTIL_HEAP_H
+#define _UTIL_HEAP_H
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <lowlevel/bitops.h>
+
+namespace Util {
+ // T must support operator < for key ordering, and
+ // have a Heap<T>::Node member called heap_node.
+
+ template <typename T>
+ class Heap {
+ public:
+ struct Node {
+ Node *parent, *left, *right;
+
+ // A pointer to the parent's left or right pointer
+ // (whichever side this node is on). This is also
+ // used for sanity checks (it is NULL for nodes not
+ // on a queue).
+
+ Node **parentlink;
+
+ Node()
+ {
+ parent = left = right = NULL;
+ parentlink = NULL;
+ }
+
+ bool is_on_heap()
+ {
+ return parentlink != NULL;
+ }
+ };
+
+ private:
+ Node *top;
+ size_t num_nodes;
+
+ T *node_to_type(Node *n)
+ {
+ return reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(n) -
+ offsetof(T, heap_node));
+ }
+
+ // Swap n with its parent
+ void swap(Node *n)
+ {
+ Node *p = n->parent;
+ *p->parentlink = n;
+
+ n->parent = p->parent;
+ p->parent = n;
+
+ Node **plink = p->parentlink;
+
+ if (n->parentlink == &p->left) {
+ assert(p->left == n);
+
+ Node *tmp = p->right;
+ p->right = n->right;
+ n->right = tmp;
+
+ p->left = n->left;
+ n->left = p;
+
+ p->parentlink = &n->left;
+
+ if (n->right) {
+ n->right->parent = n;
+ n->right->parentlink = &n->right;
+ }
+ } else {
+ assert(n->parentlink == &p->right);
+ assert(p->right == n);
+
+ Node *tmp = p->left;
+ p->left = n->left;
+ n->left = tmp;
+
+ p->right = n->right;
+ n->right = p;
+
+ p->parentlink = &n->right;
+
+ if (n->left) {
+ n->left->parent = n;
+ n->left->parentlink = &n->left;
+ }
+ }
+
+ if (p->left) {
+ p->left->parent = p;
+ p->left->parentlink = &p->left;
+
+ if (p->right) {
+ p->right->parent = p;
+ p->right->parentlink = &p->right;
+ }
+ } else {
+ assert(!p->right);
+ }
+
+ n->parentlink = plink;
+ }
+
+ // Restore heap condition by moving n towards the root,
+ // if required.
+
+ void up(Node *n)
+ {
+ T *n_type = node_to_type(n);
+
+ assert(!n->left || !(*node_to_type(n->left) < *n_type));
+ assert(!n->right || !(*node_to_type(n->right) < *n_type));
+
+ while (n->parent && *n_type < *node_to_type(n->parent)) {
+ assert(!n->left || !(*node_to_type(n->left) < *n_type));
+ assert(!n->right || !(*node_to_type(n->right) < *n_type));
+
+ swap(n);
+ }
+
+ assert(!n->left || !(*node_to_type(n->left) < *n_type));
+ assert(!n->right || !(*node_to_type(n->right) < *n_type));
+ }
+
+ // Restore heap condition by moving n towards the leaves,
+ // if required.
+
+ void down(Node *n)
+ {
+ T *n_type = node_to_type(n);
+
+ assert(!n->parent || !(*n_type < *node_to_type(n->parent)));
+
+ // If n has a right, it'll have a left.
+ while (n->left) {
+ assert(!n->parent || !(*n_type < *node_to_type(n->parent)));
+ Node *child = n->left;
+
+ if (n->right &&
+ *node_to_type(n->right) < *node_to_type(n->left))
+ child = n->right;
+
+ if (*node_to_type(child) < *n_type)
+ swap(child);
+ else
+ break;
+ }
+
+ assert(!n->parent || !(*n_type < *node_to_type(n->parent)));
+ }
+
+ Node *get_by_index(size_t num, Node **parent, Node ***link)
+ {
+ assert(num_nodes == 0 || top);
+ assert(num <= num_nodes);
+
+ // It's easier to use 1-based indices, so that the high set bit
+ // perfectly indicates which level the node is on.
+
+ num += 1;
+
+ Node *n = NULL;
+ size_t bit = 1 << ll_get_order_round_down(num);
+ *parent = NULL;
+ Node **nextlink = ⊤
+
+ do {
+ *link = nextlink;
+ *parent = n;
+ n = *nextlink;
+
+ bit /= 2;
+
+ if (num & bit)
+ nextlink = &n->right;
+ else
+ nextlink = &n->left;
+ } while (bit);
+
+ return n;
+ }
+
+ public:
+ Heap()
+ {
+ top = NULL;
+ num_nodes = 0;
+ }
+
+ bool empty()
+ {
+ return num_nodes == 0;
+ }
+
+ T *get_top()
+ {
+ if (!top)
+ return NULL;
+
+ return node_to_type(top);
+ }
+
+ void add(T *t)
+ {
+ Node *n = &t->heap_node;
+ assert(!n->is_on_heap());
+
+ Node *ret = get_by_index(num_nodes, &n->parent, &n->parentlink);
+
+ assert(ret == NULL);
+ assert(num_nodes == 0 || n->parent);
+
+ num_nodes++;
+ *n->parentlink = n;
+ n->left = n->right = NULL;
+
+ up(n);
+ }
+
+ void del(T *t)
+ {
+ Node *n = &t->heap_node;
+ assert(*n->parentlink == n);
+
+ if (--num_nodes == 0) {
+ top = NULL;
+ n->parentlink = NULL;
+ return;
+ }
+
+ Node *parent, **link;
+ Node *last = get_by_index(num_nodes, &parent, &link);
+
+ assert(last->parent);
+ assert(last->is_on_heap());
+ assert(!last->left);
+ assert(!last->right);
+
+ *link = NULL;
+ *n->parentlink = last;
+ *last = *n;
+ n->parentlink = NULL;
+
+ if (last->left) {
+ last->left->parent = last;
+ last->left->parentlink = &last->left;
+ }
+
+ if (last->right) {
+ last->right->parent = last;
+ last->right->parentlink = &last->right;
+ }
+
+ down(last);
+ }
+
+ void requeue(T *t) {
+ Node *n = &t->heap_node;
+ assert(*n->parentlink == n);
+
+ if (!n->parent || *node_to_type(n->parent) < *t)
+ down(n);
+ else
+ up(n);
+ }
+ };
+}
+
+#endif
--- /dev/null
+#ifndef _UTIL_LIST_H
+#define _UTIL_LIST_H
+
+#include <assert.h>
+
+namespace Util
+{
+ // Linked lists with nodes embedded in the data.
+ //
+ // Duplicating for auto-init is ugly, but at least it's implementation
+ // ugliness; inheriting would cause interface ugliness, with users
+ // required to either declare ListAutoInit for the vastly common case,
+ // or else use ListNoAutoInit pointers when following prev or next.
+ //
+ // Maybe some sort of templatization could be done.
+
+ struct ListNoAutoInit {
+ ListNoAutoInit *prev, *next;
+
+ void init()
+ {
+ prev = next = this;
+ }
+
+ bool empty()
+ {
+ return next == this;
+ }
+
+ void del()
+ {
+ prev->next = next;
+ next->prev = prev;
+ init();
+ }
+
+ void add_front(ListNoAutoInit *newelem)
+ {
+ assert(newelem->empty());
+
+ newelem->prev = this;
+ next->prev = newelem;
+ newelem->next = next;
+ next = newelem;
+ }
+
+ void add_back(ListNoAutoInit *newelem)
+ {
+ assert(newelem->empty());
+
+ newelem->next = this;
+ prev->next = newelem;
+ newelem->prev = prev;
+ prev = newelem;
+ }
+
+ template<typename T, int offset>
+ T *entry()
+ {
+ return reinterpret_cast<T *>(reinterpret_cast<ulong>(this) - offset);
+ }
+ };
+
+ struct List {
+ List *prev, *next;
+
+ List()
+ {
+ init();
+ }
+
+ void init()
+ {
+ prev = next = this;
+ }
+
+ bool empty()
+ {
+ return next == this;
+ }
+
+ void del()
+ {
+ prev->next = next;
+ next->prev = prev;
+ init();
+ }
+
+ void add_front(List *newelem)
+ {
+ assert(newelem->empty());
+
+ newelem->prev = this;
+ next->prev = newelem;
+ newelem->next = next;
+ next = newelem;
+ }
+
+ void add_back(List *newelem)
+ {
+ assert(newelem->empty());
+
+ newelem->next = this;
+ prev->next = newelem;
+ newelem->prev = prev;
+ prev = newelem;
+ }
+
+ template<typename T, int offset>
+ T *entry()
+ {
+ return reinterpret_cast<T *>(reinterpret_cast<ulong>(this) - offset);
+ }
+ };
+
+ // Ick. We can't pass in "member" as a template parameter, and
+ // it'd be annoying to have to manually use offsetof all over
+ // the place. Use this as if it were a member function, but
+ // be careful of the global name.
+
+ #define listentry(T, member) entry<T, offsetof(T, member)>()
+}
+
+#endif
--- /dev/null
+#ifndef _UTIL_LOCK_H
+#define _UTIL_LOCK_H
+
+// There are two basic lock types: Lock and SpinLock. A thread that
+// blocks on a Lock may sleep until it obtains the lock. In kernel code,
+// a thread that blocks on a SpinLock will not sleep, but will run a busy
+// loop until the lock is freed. In userspace code, a SpinLock behaves
+// as an ordinary Lock.
+
+#ifdef _KERNEL
+#include <kern/lock.h>
+#endif
+
+#include <util/spinlock.h>
+
+namespace Lock {
+ struct AutoLock {
+ Lock &real_lock;
+
+ AutoLock(Lock &lock) : real_lock(lock)
+ {
+ real_lock.lock();
+ }
+
+ ~AutoLock()
+ {
+ real_lock.unlock();
+ }
+ };
+
+ struct DroppableAutoLock {
+ Lock &real_lock;
+ bool dropped;
+
+ DroppableAutoLock(Lock &lock) : real_lock(lock)
+ {
+ real_lock.lock();
+ dropped = false;
+ }
+
+ ~DroppableAutoLock()
+ {
+ if (!dropped)
+ real_lock.unlock();
+ }
+
+ void lock()
+ {
+ assert(dropped);
+ real_lock.lock();
+ dropped = false;
+ }
+
+ void unlock()
+ {
+ assert(!dropped);
+ dropped = true;
+ real_lock.unlock();
+ }
+ };
+
+ struct DoubleAutoLock {
+ Lock &real_lock1, &real_lock2;
+
+ DoubleAutoLock(Lock &lock1, Lock &lock2) :
+ real_lock1(lock1), real_lock2(lock2)
+ {
+ if (&lock1 < &lock2) {
+ real_lock1.lock();
+ real_lock2.lock();
+ } else {
+ real_lock2.lock();
+ real_lock1.lock();
+ }
+ }
+
+ ~DoubleAutoLock()
+ {
+ real_lock1.unlock();
+ real_lock2.unlock();
+ }
+ };
+}
+
+#endif
--- /dev/null
+#ifndef _UTIL_MISC_H
+#define _UTIL_MISC_H
+
+namespace Util {
+ // Round the given value up or down to the specified power-of-two.
+
+ template<typename T>
+ static inline T round_up(T val, int shift)
+ {
+ return (val + (1UL << shift) - 1) & ~((1UL << shift) - 1);
+ }
+
+ template<typename T>
+ static inline T round_down(T val, int shift)
+ {
+ return val & ~((1UL << shift) - 1);
+ }
+}
+
+#endif
--- /dev/null
+// util/rbtree.h -- A red/black tree implementation
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#ifndef _UTIL_RBTREE_H
+#define _UTIL_RBTREE_H
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+
+namespace Util {
+ // T must have an RBTree<T, NodeV, KeyV>::Node member called rbtree_node.
+ // NodeV < NodeV, NodeV < KeyV, and NodeV > KeyV must be supported.
+ //
+ // Using NodeV != KeyV allows things such as having NodeV represent a
+ // range of values and KeyV a single value, causing find() to search
+ // for the node in whose range KeyV falls. It is an error to add
+ // nodes that are not well ordered.
+
+ template <typename T, typename NodeV, typename KeyV>
+ class RBTree {
+ public:
+ struct Node {
+ Node *parent, *left, *right;
+ NodeV value;
+ bool red;
+
+ // A pointer to the parent's left or right pointer
+ // (whichever side this node is on). This is also
+ // used for sanity checks (it is NULL for nodes not
+ // on a tree).
+
+ Node **parentlink;
+
+ Node()
+ {
+ parent = left = right = NULL;
+ parentlink = NULL;
+ }
+
+ bool is_on_rbtree()
+ {
+ return parentlink != NULL;
+ }
+ };
+
+ private:
+ Node *top;
+
+ T *node_to_type(Node *n)
+ {
+ return reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(n) -
+ offsetof(T, rbtree_node));
+ }
+
+ void assert_rb_condition(Node *left, Node *right)
+ {
+ assert(left->value < right->value);
+ assert(!(right->value < left->value));
+ assert(!left->red || !right->red);
+ }
+
+ void assert_well_ordered(Node *left, NodeV right)
+ {
+ assert(left->value < right);
+ assert(!(left->value > right));
+ assert(!left->red || !right->red);
+ }
+
+ Node *find_node(Node *n, NodeV &val, Node ***link);
+ Node *find_node_key(KeyV val, bool exact = true);
+
+ bool node_is_left(Node *n)
+ {
+ assert(n->parentlink == &n->parent->left ||
+ n->parentlink == &n->parent->right);
+
+ return n->parentlink == &n->parent->left;
+ }
+
+ Node *sibling(Node *n)
+ {
+ if (node_is_left(n))
+ return n->parent->right;
+
+ return n->parent->left;
+ }
+
+ Node *uncle(Node *n)
+ {
+ return sibling(n->parent);
+ }
+
+ void rotate_left(Node *n);
+ void rotate_right(Node *n);
+
+ // B must have no right child and have A as an ancestor.
+ void swap(Node *a, Node *b);
+
+ bool red(Node *n)
+ {
+ return n && n->red;
+ }
+
+public:
+ RBTree()
+ {
+ top = NULL;
+ }
+
+ bool empty()
+ {
+ return top == NULL;
+ }
+
+ T *find(KeyV value)
+ {
+ Node *n = find_node_key(value);
+
+ if (n)
+ return node_to_type(n);
+
+ return NULL;
+ }
+
+ T *find_nearest(KeyV value)
+ {
+ Node *n = find_node_key(value, false);
+
+ if (n)
+ return node_to_type(n);
+
+ // Should only happen on an empty tree
+ return NULL;
+ }
+
+ void add(T *t);
+ void del(T *t);
+ };
+
+ template <typename T, typename NodeV, typename KeyV>
+ typename RBTree<T, NodeV, KeyV>::Node *
+ RBTree<T, NodeV, KeyV>::find_node(Node *n, NodeV &val, Node ***link)
+ {
+ if (link) {
+ if (n == top)
+ *link = ⊤
+ else
+ *link = n->parentlink;
+ }
+
+ if (!top)
+ return NULL;
+
+ while (true) {
+ assert(n);
+ Node *next;
+
+ if (n->value < val) {
+ next = n->right;
+
+ if (next) {
+ assert_rb_condition(n, next);
+ } else {
+ if (link)
+ *link = &n->right;
+
+ break;
+ }
+ } else {
+ // This assert detects duplicate nodes, but not
+ // overlapping ranges (or otherwise non-well-ordered
+ // values).
+
+ assert(val < n->value);
+ next = n->left;
+
+ if (next) {
+ assert_rb_condition(next, n);
+ } else {
+ if (link)
+ *link = &n->left;
+
+ break;
+ }
+ }
+
+ n = next;
+ }
+
+ return n;
+ }
+
+ template <typename T, typename NodeV, typename KeyV>
+ typename RBTree<T, NodeV, KeyV>::Node *
+ RBTree<T, NodeV, KeyV>::find_node_key(KeyV val, bool exact)
+ {
+ Node *n = top;
+
+ while (n) {
+ Node *next;
+
+ if (n->value < val) {
+ next = n->right;
+
+ if (next)
+ assert_rb_condition(n, next);
+ } else if (n->value > val) {
+ next = n->left;
+
+ if (next)
+ assert_rb_condition(next, n);
+ } else {
+ break;
+ }
+
+ if (!next && !exact)
+ return n;
+
+ n = next;
+ }
+
+ return n;
+ }
+
+ template <typename T, typename NodeV, typename KeyV>
+ void RBTree<T, NodeV, KeyV>::rotate_left(Node *n)
+ {
+ Node *new_top = n->right;
+
+ assert(*n->parentlink == n);
+ *n->parentlink = new_top;
+
+ assert(new_top->parent == n);
+ assert(new_top->parentlink == &n->right);
+
+ new_top->parent = n->parent;
+ new_top->parentlink = n->parentlink;
+
+ n->parent = new_top;
+ n->parentlink = &new_top->left;
+
+ n->right = new_top->left;
+ new_top->left = n;
+
+ if (n->right) {
+ assert(n->right->parent == new_top);
+ assert(n->right->parentlink == &new_top->left);
+
+ n->right->parent = n;
+ n->right->parentlink = &n->right;
+ }
+ }
+
+ template <typename T, typename NodeV, typename KeyV>
+ void RBTree<T, NodeV, KeyV>::rotate_right(Node *n)
+ {
+ Node *new_top = n->left;
+
+ assert(*n->parentlink == n);
+ *n->parentlink = new_top;
+
+ assert(new_top->parent == n);
+ assert(new_top->parentlink == &n->left);
+
+ new_top->parent = n->parent;
+ new_top->parentlink = n->parentlink;
+
+ n->parent = new_top;
+ n->parentlink = &new_top->right;
+
+ n->left = new_top->right;
+ new_top->right = n;
+
+ if (n->left) {
+ assert(n->left->parent == new_top);
+ assert(n->left->parentlink == &new_top->right);
+
+ n->left->parent = n;
+ n->left->parentlink = &n->left;
+ }
+ }
+
+ // B must have no right child and have A as an ancestor.
+ template <typename T, typename NodeV, typename KeyV>
+ void RBTree<T, NodeV, KeyV>::swap(Node *a, Node *b)
+ {
+ Node *bparent = b->parent;
+ Node **bparentlink = b->parentlink;
+
+ assert(!b->right);
+ assert(a->left || a->right);
+
+ b->parent = a->parent;
+ b->parentlink = a->parentlink;
+
+ if (bparent == a) {
+ a->parent = b;
+ a->parentlink = &b->left;
+ } else {
+ a->parent = bparent;
+ a->parentlink = bparentlink;
+ }
+
+ assert(a->parent != a);
+ assert(b->parent != b);
+
+ Node *bleft = b->left;
+ b->left = a->left;
+ a->left = bleft;
+
+ b->right = a->right;
+ a->right = NULL;
+
+ *a->parentlink = a;
+ *b->parentlink = b;
+
+ assert(a->parent != a);
+ assert(b->parent != b);
+
+ bool bred = b->red;
+ b->red = a->red;
+ a->red = bred;
+
+ if (a->left) {
+ a->left->parent = a;
+ a->left->parentlink = &a->left;
+ }
+
+ if (b->right) {
+ b->right->parent = b;
+ b->right->parentlink = &b->right;
+ }
+
+ assert(b->left);
+ b->left->parent = b;
+ b->left->parentlink = &b->left;
+ }
+
+ template <typename T, typename NodeV, typename KeyV>
+ void RBTree<T, NodeV, KeyV>::add(T *t)
+ {
+ Node *n = &t->rbtree_node;
+ assert(!n->is_on_rbtree());
+
+ Node *insert_at = find_node(top, n->value, &n->parentlink);
+
+ assert(insert_at || !top);
+ *n->parentlink = n;
+ n->parent = insert_at;
+ n->left = n->right = NULL;
+
+ repeat:
+ assert(n->parentlink);
+ assert(*n->parentlink == n);
+
+ if (!n->parent) {
+ n->red = false;
+ return;
+ }
+
+ assert(n->parent->value < n->value != n->value < n->parent->value);
+ n->red = true;
+
+ if (!n->parent->red)
+ return;
+
+ Node *unc = uncle(n);
+ if (red(unc)) {
+ assert(!n->parent->parent->red);
+ n->parent->red = unc->red = false;
+ n = n->parent->parent;
+ goto repeat;
+ }
+
+ if (node_is_left(n)) {
+ if (!node_is_left(n->parent)) {
+ rotate_right(n->parent);
+ n = n->right;
+ }
+ } else {
+ if (node_is_left(n->parent)) {
+ rotate_left(n->parent);
+ n = n->left;
+ }
+ }
+
+ assert(n->parent->red);
+ assert(!red(uncle(n)));
+ assert(!n->parent->parent->red);
+
+ n->parent->red = false;
+ n->parent->parent->red = true;
+
+ if (node_is_left(n)) {
+ assert(node_is_left(n->parent));
+ rotate_right(n->parent->parent);
+ } else {
+ assert(!node_is_left(n->parent));
+ rotate_left(n->parent->parent);
+ }
+ }
+
+ template <typename T, typename NodeV, typename KeyV>
+ void RBTree<T, NodeV, KeyV>::del(T *t)
+ {
+ Node *n = &t->rbtree_node;
+ assert(*n->parentlink == n);
+
+ if (n->left && n->right) {
+ Node *highest_on_left = find_node(n->left, n->value, NULL);
+ assert(!highest_on_left->right);
+ swap(n, highest_on_left);
+ assert(!n->right);
+ }
+
+ Node *parent = n->parent;
+ Node *child = n->left ? n->left : n->right;
+
+ if (child) {
+ assert(child->parent == n);
+
+ child->parent = n->parent;
+ child->parentlink = n->parentlink;
+ *child->parentlink = child;
+ assert(child != parent);
+ } else {
+ *n->parentlink = NULL;
+ }
+
+ n->parentlink = NULL;
+
+ if (n->red)
+ return;
+
+ n = child;
+
+ if (red(n)) {
+ n->red = false;
+ return;
+ }
+
+ repeat:
+ if (n == top) {
+ assert(!red(n));
+ return;
+ }
+
+ Node *sib;
+
+ if (n)
+ sib = sibling(n);
+ else
+ sib = parent->left ? parent->left : parent->right;
+
+ if (sib->red) {
+ assert(!parent->red);
+ assert(!red(sib->left));
+ assert(!red(sib->right));
+
+ parent->red = true;
+ sib->red = false;
+
+ if (node_is_left(sib)) {
+ rotate_right(parent);
+ sib = parent->left;
+ } else {
+ rotate_left(parent);
+ sib = parent->right;
+ }
+
+ if (n)
+ assert(sib == sibling(n));
+ } else if (!parent->red && !red(sib->left) && !red(sib->right)) {
+ sib->red = true;
+ assert(n != parent);
+ n = parent;
+ parent = parent->parent;
+ goto repeat;
+ }
+
+ assert(!sib->red);
+
+ if (!parent->red && !red(sib->left) && !red(sib->right)) {
+ sib->red = true;
+ parent->red = false;
+ return;
+ }
+
+ if (!red(sib->left) && !red(sib->right)) {
+ assert(parent->red);
+ sib->red = true;
+ parent->red = false;
+ return;
+ }
+
+ if (node_is_left(sib) && !red(sib->left) && red(sib->right)) {
+ sib->red = true;
+ sib->right->red = false;
+ rotate_left(sib);
+ sib = sib->parent;
+ } else if (!node_is_left(sib) && !red(sib->right) && red(sib->left)) {
+ sib->red = true;
+ sib->left->red = false;
+ rotate_right(sib);
+ sib = sib->parent;
+ }
+
+ assert(parent == sib->parent);
+ assert(!sib->red);
+
+ sib->red = parent->red;
+ parent->red = false;
+
+ if (node_is_left(sib)) {
+ assert(sib->left->red);
+ sib->left->red = false;
+ rotate_right(parent);
+ } else {
+ assert(sib->right->red);
+ sib->right->red = false;
+ rotate_left(parent);
+ }
+ }
+}
+
+#endif
--- /dev/null
+#ifndef _UTIL_SPINLOCK_H
+#define _UTIL_SPINLOCK_H
+
+// There are two basic lock types: Lock and SpinLock. A thread that
+// blocks on a Lock may sleep until it obtains the lock. In kernel code,
+// a thread that blocks on a SpinLock will not sleep, but will run a busy
+// loop until the lock is freed. In userspace code, a SpinLock behaves
+// as an ordinary Lock.
+
+#ifdef _KERNEL
+#include <kern/spinlock.h>
+#endif
+
+namespace Lock {
+#ifndef _KERNEL
+#error FIXME -- implement locks for userspace
+ struct Lock {
+ };
+
+ typedef Lock SpinLock;
+#endif
+
+ struct AutoSpinLock {
+ SpinLock &real_lock;
+
+ AutoSpinLock(SpinLock &lock) : real_lock(lock)
+ {
+ real_lock.lock();
+ }
+
+ ~AutoSpinLock()
+ {
+ real_lock.unlock();
+ }
+ };
+
+ struct DroppableAutoSpinLock {
+ SpinLock &real_lock;
+ bool dropped;
+
+ DroppableAutoSpinLock(SpinLock &lock) : real_lock(lock)
+ {
+ real_lock.lock();
+ dropped = false;
+ }
+
+ ~DroppableAutoSpinLock()
+ {
+ if (!dropped)
+ real_lock.unlock();
+ }
+
+ void lock()
+ {
+ assert(dropped);
+ real_lock.lock();
+ dropped = false;
+ }
+
+ void unlock()
+ {
+ assert(!dropped);
+ dropped = true;
+ real_lock.unlock();
+ }
+ };
+
+ struct AutoSpinLockIRQ {
+ SpinLock &real_lock;
+
+ AutoSpinLockIRQ(SpinLock &lock) : real_lock(lock)
+ {
+ real_lock.lock_irq();
+ }
+
+ ~AutoSpinLockIRQ()
+ {
+ real_lock.unlock_irq();
+ }
+ };
+
+ struct DroppableAutoSpinLockIRQ {
+ SpinLock &real_lock;
+ bool dropped;
+
+ DroppableAutoSpinLockIRQ(SpinLock &lock) : real_lock(lock)
+ {
+ real_lock.lock_irq();
+ dropped = false;
+ }
+
+ ~DroppableAutoSpinLockIRQ()
+ {
+ if (!dropped)
+ real_lock.unlock_irq();
+ }
+
+ void lock()
+ {
+ assert(dropped);
+ real_lock.lock_irq();
+ dropped = false;
+ }
+
+ void unlock()
+ {
+ assert(!dropped);
+ dropped = true;
+ real_lock.unlock_irq();
+ }
+ };
+
+ struct AutoSpinLockRecIRQ {
+ SpinLock &real_lock;
+ ulong savedirq;
+
+ AutoSpinLockRecIRQ(SpinLock &lock) : real_lock(lock)
+ {
+ savedirq = real_lock.lock_recirq();
+ }
+
+ ~AutoSpinLockRecIRQ()
+ {
+ real_lock.unlock_recirq(savedirq);
+ }
+ };
+
+ struct DroppableAutoSpinLockRecIRQ {
+ SpinLock &real_lock;
+ ulong savedirq;
+ bool dropped;
+
+ DroppableAutoSpinLockRecIRQ(SpinLock &lock) : real_lock(lock)
+ {
+ savedirq = real_lock.lock_recirq();
+ dropped = false;
+ }
+
+ ~DroppableAutoSpinLockRecIRQ()
+ {
+ if (!dropped)
+ real_lock.unlock_recirq(savedirq);
+ }
+
+ void lock()
+ {
+ assert(dropped);
+ savedirq = real_lock.lock_recirq();
+ dropped = false;
+ }
+
+ void unlock()
+ {
+ assert(!dropped);
+ dropped = true;
+ real_lock.unlock_recirq(savedirq);
+ }
+ };
+}
+
+#endif
--- /dev/null
+include ../../../../Makefile.target
+
+CXXFLAGS += -I../.. -I../../../c $(DEFS) -Wall -Werror
+
+all: heap rbtree
+
--- /dev/null
+#include <util/heap.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <time.h>
+
+using Util::Heap;
+
+struct Data {
+ unsigned int magic;
+ unsigned int data;
+ Heap<Data>::Node heap_node;
+
+ bool operator < (Data &d)
+ {
+ return data < d.data;
+ }
+
+ Data()
+ {
+ magic = 0xdeadbeef;
+ }
+};
+
+static const int heap_size = 10000;
+
+Data data[heap_size];
+
+int main(void)
+{
+ Heap<Data> heap;
+ srandom(1);
+
+ for (int i = 0; i < heap_size; i++) {
+ Data *d = &data[i];
+ d->data = random() & 0x7fffffff;
+ heap.add(d);
+ }
+
+ unsigned int last = 0;
+
+ for (int i = 0; i < heap_size; i++) {
+ Data *d = heap.get_top();
+ if (d == NULL) {
+ printf("FAILED: returned NULL early\n");
+ exit(1);
+ }
+
+ if (d->magic != 0xdeadbeef) {
+ printf("FAILED: garbage data returned\n");
+ exit(1);
+ }
+
+ if (d->data >= 0x80000000) {
+ printf("FAILED: duplicate or garbage data returned\n");
+ exit(1);
+ }
+
+ heap.del(d);
+
+ if (d->data < last) {
+ printf("FAILED: not monotonically increasing\n");
+ exit(1);
+ }
+
+ last = d->data;
+ d->data = 0x80000000;
+ }
+
+ for (int i = 0; i < heap_size; i++) {
+ Data *d = &data[i];
+ d->data = random() & 0x7fffffff;
+ heap.add(d);
+ }
+
+ last = 0;
+
+ for (int i = 0; i < heap_size; i++) {
+ Data *d = heap.get_top();
+ if (d == NULL) {
+ printf("FAILED: with requeue: returned NULL early\n");
+ exit(1);
+ }
+
+ if (d->magic != 0xdeadbeef) {
+ printf("FAILED: with requeue: garbage data returned\n");
+ exit(1);
+ }
+
+ if (d->data >= 0x80000000) {
+ printf("FAILED: with requeue: duplicate or garbage data returned\n");
+ exit(1);
+ }
+
+ heap.del(d);
+
+ if (d->data < last) {
+ printf("FAILED: with requeue: not monotonically increasing\n");
+ exit(1);
+ }
+
+ last = d->data;
+ d->data = 0x80000000;
+
+ for (int j = 0; j < 10; j++) {
+ d = &data[random() % heap_size];
+
+ if (d->data == 0x80000000)
+ continue;
+
+ d->data = random() & 0x7fffffff;
+ heap.requeue(d);
+
+ if (d->data < last)
+ last = d->data;
+ }
+ }
+
+ printf("PASSED\n");
+}
--- /dev/null
+#include <util/rbtree.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <time.h>
+
+using Util::RBTree;
+
+struct Data;
+//struct Node;
+typedef RBTree<Data, unsigned int, unsigned int> RBType;
+
+#if 0
+struct Node {
+ unsigned int data;
+
+ bool operator < (Node &d)
+ {
+ return data < d.data;
+ }
+
+ bool operator < (int d)
+ {
+ return data < d;
+ }
+
+ bool operator > (int d)
+ {
+ return data > d;
+ }
+};
+
+#endif
+struct Data {
+ unsigned int magic;
+ RBType::Node rbtree_node;
+
+ Data()
+ {
+ magic = 0xdeadbeef;
+ }
+};
+
+static const unsigned int rbtree_size = 1000000;
+
+Data data[rbtree_size];
+
+int main(void)
+{
+ RBType rbtree;
+ srandom(time(NULL));
+
+ for (unsigned int i = 0; i < rbtree_size; i++) {
+ Data *d = &data[i];
+ unsigned int val;
+
+ do {
+ val = random() & 0x7fffffff;
+ } while (rbtree.find(val));
+
+ d->rbtree_node.value = val;
+ rbtree.add(d);
+ }
+
+ for (unsigned int i = 0; i < rbtree_size; i++) {
+ unsigned int val = data[i].rbtree_node.value;
+ Data *d = rbtree.find(val);
+ assert(d);
+ assert(d->rbtree_node.value == val);
+ rbtree.del(d);
+ }
+
+ printf("PASSED\n");
+}
--- /dev/null
+This is a header library that provides generic low-level facilities,
+mainly abstractions of architecture-specific operations that C/C++ do
+not abstract. It is intended to be usable by the kernel, user apps,
+and standalone code such as bootloaders, with either C or C++.
+
+It contains both privileged and nonprivileged operations (and some,
+such as reading a clock, whose privileged status and/or availability
+in a standalone environment depends on the architecture or other
+factors); it is up to the user of the library to know whether it is
+privileged enough to use any given function.
--- /dev/null
+// Byte-swapping, unaligned memory, and I/O port access functions. These
+// functions are all unprivileged, except the ll_in_* and ll_out_*
+// functions, whose privilege is architecture-dependent (on PPC and most
+// other non-x86 architectures, they require the address specified to be
+// mapped appropriately).
+
+#ifndef _LL_ARCH_IO_H
+#define _LL_ARCH_IO_H
+
+#include <lowlevel/types.h>
+
+// Unconditional byte-swap
+
+static inline uint16_t ll_swap16(uint16_t val)
+{
+ return (uint16_t)val | ((uint16_t)val >> 8) << 8);
+}
+
+static inline uint32_t ll_swap32(uint32_t val)
+{
+ return ((val & 0x000000ff) << 24) |
+ ((val & 0x0000ff00) << 8) |
+ ((val & 0x00ff0000) >> 8) |
+ ((val & 0xff000000) >> 24);
+}
+
+static inline uint64_t ll_swap64(uint64_t val)
+{
+ return (uint64_t)ll_swap32(val) | ((uint64_t)ll_swap32(val >> 32) << 32);
+}
+
+// Conditional byte-swap to/from a specific endianness
+
+static inline uint64_t ll_swap_be64(uint64_t val)
+{
+ return val;
+}
+
+static inline uint32_t ll_swap_be32(uint32_t val)
+{
+ return val;
+}
+
+static inline uint16_t ll_swap_be16(uint16_t val)
+{
+ return val;
+}
+
+static inline uint64_t ll_swap_le64(uint64_t val)
+{
+ return ll_swap64(val);
+}
+
+static inline uint32_t ll_swap_le32(uint32_t val)
+{
+ return ll_swap32(val);
+}
+
+static inline uint16_t ll_swap_le16(uint16_t val)
+{
+ return ll_swap16(val);
+}
+
+// Unaligned, host-endian
+
+static inline uint16_t ll_load_unaligned_16(void *addr)
+{
+ return *(uint16_t *)addr;
+}
+
+static inline uint64_t ll_load_unaligned_64(void *addr)
+{
+ // FIXME: atomic using FP?
+ return *(uint64_t *)addr;
+}
+
+static inline uint32_t ll_load_unaligned_32(void *addr)
+{
+ return *(uint32_t *)addr;
+}
+
+static inline void ll_store_unaligned_16(void *addr, uint16_t val)
+{
+ *(uint16_t *)addr = val;
+}
+
+static inline void ll_store_unaligned_64(void *addr, uint64_t val)
+{
+ // FIXME: atomic using FP?
+ *(uint64_t *)addr = val;
+}
+
+static inline void ll_store_unaligned_32(void *addr, uint32_t val)
+{
+ *(uint32_t *)addr = val;
+}
+
+// Unaligned, little-endian
+
+static inline uint16_t ll_load_unaligned_le16(void *addr)
+{
+ uint16_t val;
+ asm("lhbrx %0, 0, %1" : "=r" (val) : "r" (addr), "m" (*(uint16_t *)addr));
+ return val;
+}
+
+static inline uint32_t ll_load_unaligned_le32(void *addr)
+{
+ uint32_t val;
+ asm("lwbrx %0, 0, %1" : "=r" (val) : "r" (addr), "m" (*(uint32_t *)addr));
+ return val;
+}
+
+static inline uint64_t ll_load_unaligned_le64(void *addr)
+{
+ // FIXME: atomic using FP?
+ return (uint64_t)ll_load_unaligned_le32(addr) |
+ ((uint64_t)ll_load_unaligned_le32((void *)((char *)addr + 4)) << 32);
+}
+
+static inline void ll_store_unaligned_le16(void *addr, uint16_t val)
+{
+ asm("sthbrx %1, 0, %2" : "=m" (*(uint16_t *)addr) : "r" (val), "r" (addr));
+}
+
+static inline void ll_store_unaligned_le32(void *addr, uint32_t val)
+{
+ asm("stwbrx %1, 0, %2" : "=m" (*(uint32_t *)addr) : "r" (val), "r" (addr));
+}
+
+static inline void ll_store_unaligned_le64(void *addr, uint64_t val)
+{
+ // FIXME: atomic using FP?
+ ll_store_unaligned_le32(addr, (uint32_t)val);
+ ll_store_unaligned_le32((void *)((char *)addr + 4), (uint32_t)(val >> 32));
+}
+
+// Aligned, little-endian
+
+static inline uint64_t ll_load_le64(uint64_t *addr)
+{
+ return ll_load_unaligned_le64(addr);
+}
+
+static inline uint32_t ll_load_le32(uint32_t *addr)
+{
+ return ll_load_unaligned_le32(addr);
+}
+
+static inline uint16_t ll_load_le16(uint16_t *addr)
+{
+ return ll_load_unaligned_le16(addr);
+}
+
+static inline void ll_store_le64(uint64_t *addr, uint64_t val)
+{
+ ll_store_unaligned_le64(addr, val);
+}
+
+static inline void ll_store_le32(uint32_t *addr, uint32_t val)
+{
+ ll_store_unaligned_le32(addr, val);
+}
+
+static inline void ll_store_le16(uint16_t *addr, uint16_t val)
+{
+ ll_store_unaligned_le16(addr, val);
+}
+
+// Unaligned, big-endian
+
+static inline uint64_t ll_load_unaligned_be64(void *addr)
+{
+ return ll_load_unaligned_64(addr);
+}
+
+static inline uint32_t ll_load_unaligned_be32(void *addr)
+{
+ return ll_load_unaligned_32(addr);
+}
+
+static inline uint16_t ll_load_unaligned_be16(void *addr)
+{
+ return ll_load_unaligned_16(addr);
+}
+
+static inline void ll_store_unaligned_be32(void *addr, uint32_t val)
+{
+ ll_store_unaligned_32(addr, val);
+}
+
+static inline void ll_store_unaligned_be16(void *addr, uint16_t val)
+{
+ ll_store_unaligned_16(addr, val);
+}
+
+// Aligned, big-endian
+
+static inline uint32_t ll_load_be32(uint32_t *addr)
+{
+ return ll_load_unaligned_be32(addr);
+}
+
+static inline uint16_t ll_load_be16(uint16_t *addr)
+{
+ return ll_load_unaligned_be16(addr);
+}
+
+static inline void ll_store_be32(uint32_t *addr, uint32_t val)
+{
+ ll_store_unaligned_be32(addr, val);
+}
+
+static inline void ll_store_be16(uint16_t *addr, uint16_t val)
+{
+ ll_store_unaligned_be16(addr, val);
+}
+
+// PCI/ISA/similar I/O-space; if memory-mapped, addr must include
+// the base of the I/O window.
+
+static inline uint8_t ll_in_8(ulong addr)
+{
+ return *(uint8_t *)addr;
+}
+
+static inline uint16_t ll_in_be16(ulong addr)
+{
+ return ll_load_be16(addr);
+}
+
+static inline uint32_t ll_in_be32(ulong addr)
+{
+ return ll_load_be32(addr);
+}
+
+static inline uint16_t ll_in_le16(ulong addr)
+{
+ return ll_load_le16(addr);
+}
+
+static inline uint32_t ll_in_le32(ulong addr)
+{
+ return ll_load_le32(addr);
+}
+
+static inline void ll_out_8(ulong addr, uint8_t val)
+{
+ *(uint8_t *)addr = val;
+}
+
+static inline void ll_out_be16(ulong addr, uint16_t val)
+{
+ ll_store_be16(addr, val);
+}
+
+static inline void ll_out_be32(ulong addr, uint32_t val)
+{
+ ll_store_be32(addr, val);
+}
+
+static inline void ll_out_le16(ulong addr, uint16_t val)
+{
+ ll_store_le16(addr, val);
+}
+
+static inline void ll_out_le32(ulong addr, uint32_t val)
+{
+ ll_store_le32(addr, val);
+}
+
+#endif
--- /dev/null
+#ifndef _LL_ARCH_ATOMIC_H
+#define _LL_ARCH_ATOMIC_H
+
+#include <lowlevel/arch-x86-common/atomic.h>
+
+static inline int ll_cmpxchg_long(unsigned long *val, unsigned long oldval,
+ unsigned long newval)
+{
+ int ret;
+
+ if (_LL_SMP)
+ asm("lock; cmpxchgq %0, %1; setz %b1" :
+ "+m" (*val) : "r" (newval), "a" (oldval) : "memory");
+ else
+ asm("cmpxchgq %0, %1; setz %b1" :
+ "+m" (*val) : "r" (newval), "a" (oldval) : "memory");
+
+ return ret;
+}
+
+static inline unsigned long ll_xchg_long(unsigned long *ptr,
+ unsigned long val)
+{
+ asm("xchgq %0, %1" : "+r" (val), "+m" (*ptr));
+ return val;
+}
+
+#endif
--- /dev/null
+#include <lowlevel/arch-x86-common/barriers-fence.h>
--- /dev/null
+// Bit manipulation functions. These functions are not privileged.
+
+#ifndef _LL_ARCH_BITOPS_H
+#define _LL_ARCH_BITOPS_H
+
+// Find First (least-significant) Set, counting from 0,
+// undefined if no bits set
+
+static inline int ll_ffs(unsigned long val)
+{
+ unsigned long ret;
+ asm("bsfq %1, %0" : "=r" (ret) : "r" (val));
+ return ret;
+}
+
+// Find Last (most-significant) Set, counting from 0,
+// undefined if no bits set
+
+static inline int ll_fls(unsigned long val)
+{
+ unsigned long ret;
+ asm("bsrq %1, %0" : "=r" (ret) : "r" (val));
+ return ret;
+}
+
+// As above, but on 64-bit values, regardless of sizeof(long)
+static inline int ll_ffs64(uint64_t val)
+{
+ return ll_ffs(val);
+}
+
+static inline int ll_fls64(uint64_t val)
+{
+ return ll_fls(val);
+}
+
+// Set/Clear the nth bit in a multiword bitmap. These functions
+// are endian and word-size dependent.
+
+static inline void ll_multiword_set_bit(unsigned long *bitmap, int bit)
+{
+ asm("bts %1, %0" : "=m" (bitmap[0]) : "r" (bit) : "memory");
+}
+
+static inline void ll_multiword_clear_bit(unsigned long *bitmap, int bit)
+{
+ asm("btr %1, %0" : "=m" (bitmap[0]) : "r" (bit) : "memory");
+}
+
+#endif
--- /dev/null
+#include <lowlevel/arch-x86-common/clock.h>
--- /dev/null
+// Byte-swapping, unaligned memory, and I/O port access functions. These
+// functions are all unprivileged, except the ll_in_* and ll_out_*
+// functions, whose privilege is architecture-dependent (on x64,
+// they require IOPL 3).
+
+#ifndef _LL_ARCH_IO_H
+#define _LL_ARCH_IO_H
+
+#include <lowlevel/types.h>
+
+// Unconditional byte-swap
+
+static inline uint16_t ll_swap16(uint16_t val)
+{
+ asm("xchgb %b0, %h0" : "+abcd" (val));
+ return val;
+}
+
+static inline uint32_t ll_swap32(uint32_t val)
+{
+ asm("bswap %0" : "+r" (val));
+ return val;
+}
+
+static inline uint64_t ll_swap64(uint64_t val)
+{
+ asm("bswap %0" : "+r" (val));
+ return val;
+}
+
+// Conditional byte-swap to/from a specific endianness
+
+static inline uint64_t ll_swap_le64(uint64_t val)
+{
+ return val;
+}
+
+static inline uint32_t ll_swap_le32(uint32_t val)
+{
+ return val;
+}
+
+static inline uint16_t ll_swap_le16(uint16_t val)
+{
+ return val;
+}
+
+static inline uint64_t ll_swap_be64(uint64_t val)
+{
+ return ll_swap64(val);
+}
+
+static inline uint32_t ll_swap_be32(uint32_t val)
+{
+ return ll_swap32(val);
+}
+
+static inline uint16_t ll_swap_be16(uint16_t val)
+{
+ return ll_swap16(val);
+}
+
+// Unaligned, host-endian
+
+static inline uint64_t ll_load_unaligned_64(void *addr)
+{
+ return *(uint64_t *)addr;
+}
+
+static inline uint32_t ll_load_unaligned_32(void *addr)
+{
+ return *(uint32_t *)addr;
+}
+
+static inline uint16_t ll_load_unaligned_16(void *addr)
+{
+ return *(uint16_t *)addr;
+}
+
+static inline void ll_store_unaligned_64(void *addr, uint64_t val)
+{
+ *(uint64_t *)addr = val;
+}
+
+static inline void ll_store_unaligned_32(void *addr, uint32_t val)
+{
+ *(uint32_t *)addr = val;
+}
+
+static inline void ll_store_unaligned_16(void *addr, uint16_t val)
+{
+ *(uint16_t *)addr = val;
+}
+
+// Unaligned, big-endian
+
+static inline uint16_t ll_load_unaligned_be16(void *addr)
+{
+ return ll_swap16(ll_load_unaligned_16(addr));
+}
+
+static inline uint32_t ll_load_unaligned_be32(void *addr)
+{
+ return ll_swap32(ll_load_unaligned_32(addr));
+}
+
+static inline uint64_t ll_load_unaligned_be64(void *addr)
+{
+ return ll_swap64(ll_load_unaligned_64(addr));
+}
+
+static inline void ll_store_unaligned_be16(void *addr, uint16_t val)
+{
+ ll_store_unaligned_16(addr, ll_swap16(val));
+}
+
+static inline void ll_store_unaligned_be32(void *addr, uint32_t val)
+{
+ ll_store_unaligned_32(addr, ll_swap32(val));
+}
+
+static inline void ll_store_unaligned_be64(void *addr, uint64_t val)
+{
+ ll_store_unaligned_64(addr, ll_swap64(val));
+}
+
+// Aligned, big-endian
+
+static inline uint64_t ll_load_be64(uint64_t *addr)
+{
+ return ll_load_unaligned_be64(addr);
+}
+
+static inline uint32_t ll_load_be32(uint32_t *addr)
+{
+ return ll_load_unaligned_be32(addr);
+}
+
+static inline uint16_t ll_load_be16(uint16_t *addr)
+{
+ return ll_load_unaligned_be16(addr);
+}
+
+static inline void ll_store_be64(uint64_t *addr, uint64_t val)
+{
+ ll_store_unaligned_be64(addr, val);
+}
+
+static inline void ll_store_be32(uint32_t *addr, uint32_t val)
+{
+ ll_store_unaligned_be32(addr, val);
+}
+
+static inline void ll_store_be16(uint16_t *addr, uint16_t val)
+{
+ ll_store_unaligned_be16(addr, val);
+}
+
+// Unaligned, little-endian
+
+static inline uint64_t ll_load_unaligned_le64(void *addr)
+{
+ return ll_load_unaligned_64(addr);
+}
+
+static inline uint32_t ll_load_unaligned_le32(void *addr)
+{
+ return ll_load_unaligned_32(addr);
+}
+
+static inline uint16_t ll_load_unaligned_le16(void *addr)
+{
+ return ll_load_unaligned_16(addr);
+}
+
+static inline void ll_store_unaligned_le32(void *addr, uint32_t val)
+{
+ ll_store_unaligned_32(addr, val);
+}
+
+static inline void ll_store_unaligned_le16(void *addr, uint16_t val)
+{
+ ll_store_unaligned_16(addr, val);
+}
+
+// Aligned, little-endian
+
+static inline uint32_t ll_load_le32(uint32_t *addr)
+{
+ return ll_load_unaligned_le32(addr);
+}
+
+static inline uint16_t ll_load_le16(uint16_t *addr)
+{
+ return ll_load_unaligned_le16(addr);
+}
+
+static inline void ll_store_le32(uint32_t *addr, uint32_t val)
+{
+ ll_store_unaligned_le32(addr, val);
+}
+
+static inline void ll_store_le16(uint16_t *addr, uint16_t val)
+{
+ ll_store_unaligned_le16(addr, val);
+}
+
+// PCI/ISA/similar I/O-space; if memory-mapped, addr must include
+// the base of the I/O window.
+
+static inline uint8_t ll_in_8(uintptr_t addr)
+{
+ uint8_t val;
+ asm volatile("inb %w1, %b0" : "=a" (val) : "d" (addr) : "memory");
+ return val;
+}
+
+static inline uint16_t ll_in_le16(uintptr_t addr)
+{
+ uint16_t val;
+ asm volatile("inw %w1, %w0" : "=a" (val) : "d" (addr) : "memory");
+ return val;
+}
+
+static inline uint32_t ll_in_le32(uintptr_t addr)
+{
+ uint32_t val;
+ asm volatile("inl %w1, %0" : "=a" (val) : "d" (addr) : "memory");
+ return val;
+}
+
+static inline uint16_t ll_in_be16(uintptr_t addr)
+{
+ return ll_swap16(ll_in_le16(addr));
+}
+
+static inline uint32_t ll_in_be32(uintptr_t addr)
+{
+ return ll_swap32(ll_in_le32(addr));
+}
+
+static inline void ll_out_8(uintptr_t addr, uint8_t val)
+{
+ asm volatile("outb %b0, %w1" : : "a" (val), "d" (addr) : "memory");
+}
+
+static inline void ll_out_le16(uintptr_t addr, uint16_t val)
+{
+ asm volatile("outw %w0, %w1" : : "a" (val), "d" (addr) : "memory");
+}
+
+static inline void ll_out_le32(uintptr_t addr, uint32_t val)
+{
+ asm volatile("outl %0, %w1" : : "a" (val), "d" (addr) : "memory");
+}
+
+static inline void ll_out_be16(uintptr_t addr, uint16_t val)
+{
+ ll_out_le16(addr, ll_swap16(val));
+}
+
+static inline void ll_out_be32(uintptr_t addr, uint32_t val)
+{
+ ll_out_le32(addr, ll_swap32(val));
+}
+
+#endif
--- /dev/null
+#ifndef _LL_ARCH_MISC_H
+#define _LL_ARCH_MISC_H
+
+#include <lowlevel/arch-x86-common/misc.h>
+
+#endif
--- /dev/null
+// include/arch-x64/types.h
+
+#ifndef _LL_ARCH_TYPES_H
+#define _LL_ARCH_TYPES_H
+
+typedef unsigned char uint8_t;
+typedef unsigned short uint16_t;
+typedef unsigned int uint32_t;
+typedef unsigned long long uint64_t;
+typedef char int8_t;
+typedef short int16_t;
+typedef int int32_t;
+typedef long long int64_t;
+
+typedef unsigned long size_t;
+typedef long ssize_t;
+typedef long intptr_t;
+typedef unsigned long uintptr_t;
+typedef long ptrdiff_t;
+
+#define _LL_LONG_BYTES 8
+#define _LL_LONG_LOGBYTES 3
+
+#endif
--- /dev/null
+// Atomic operations. These functions are not privileged, but may not be
+// available on all architectures. By default, they operate on 32-bit
+// quantities; however, certain functions are also available in a "long"
+// version which uses "unsigned long"; these are provided when the
+// function is likely to be useful for storing pointers rather than just
+// counters. The long versions are defined arch-x86/atomic.h and
+// arch-x64/atomic.h, not here.
+
+#ifndef _LL_ARCH_X86C_ATOMIC_H
+#define _LL_ARCH_X86C_ATOMIC_H
+
+#include <lowlevel/types.h>
+
+static inline void ll_atomic_inc(int32_t *val)
+{
+ if (_LL_SMP)
+ asm("lock; incl %0" : "+m" (*val) : : "memory");
+ else
+ asm("incl %0" : "+m" (*val) : : "memory");
+}
+
+static inline void ll_atomic_dec(int32_t *val)
+{
+ if (_LL_SMP)
+ asm("lock; decl %0" : "+m" (*val) : : "memory");
+ else
+ asm("decl %0" : "+m" (*val) : : "memory");
+}
+
+// Increment/Decrement and return non-zero if the new count is zero.
+
+static inline int ll_atomic_inc_and_test(int32_t *val)
+{
+ uint8_t ret;
+
+ if (_LL_SMP)
+ asm("lock; incl %0; setz %b1" : "+m" (*val), "=r" (ret) : : "memory");
+ else
+ asm("incl %0; setz %b1" : "+m" (*val), "=r" (ret) : : "memory");
+
+ return ret;
+}
+
+static inline int ll_atomic_dec_and_test(int32_t *val)
+{
+ uint8_t ret;
+
+ if (_LL_SMP)
+ asm("lock; decl %0; setz %b1" : "+m" (*val), "=r" (ret) : : "memory");
+ else
+ asm("decl %0; setz %b1" : "+m" (*val), "=r" (ret) : : "memory");
+
+ return ret;
+}
+
+static inline int ll_test_and_set(uint32_t *val, int bit)
+{
+ uint8_t ret;
+
+ if (_LL_SMP)
+ asm("lock; btsl %2, %0; setc %b1" : "+m" (*val), "=r" (ret) :
+ "ri" (bit) : "memory");
+ else
+ asm("btsl %2, %0; setc %b1" : "+m" (*val), "=r" (ret) :
+ "ri" (bit) : "memory");
+
+ return ret;
+}
+
+static inline int ll_test_and_clear(uint32_t *val, int bit)
+{
+ uint8_t ret;
+
+ if (_LL_SMP)
+ asm("lock; btrl %2, %0; setc %b1" : "+m" (*val), "=r" (ret) :
+ "ri" (bit) : "memory");
+ else
+ asm("btrl %2, %0; setc %b1" : "+m" (*val), "=r" (ret) :
+ "ri" (bit) : "memory");
+
+ return ret;
+}
+
+static inline int ll_test_and_flip(uint32_t *val, int bit)
+{
+ uint8_t ret;
+
+ if (_LL_SMP)
+ asm("lock; btcl %2, %0; setc %b1" : "+m" (*val), "=r" (ret) :
+ "ri" (bit) : "memory");
+ else
+ asm("btcl %2, %0; setc %b1" : "+m" (*val), "=r" (ret) :
+ "ri" (bit) : "memory");
+
+ return ret;
+}
+
+// Store newval in the pointer if oldval was there before.
+// Returns non-zero if the store succeded.
+
+static inline int ll_cmpxchg(uint32_t *val, uint32_t oldval, uint32_t newval)
+{
+ uint8_t ret;
+
+ if (_LL_SMP)
+ asm("lock; cmpxchgl %2, %0; setz %b1" :
+ "+m" (*val), "=r" (ret) : "r" (newval), "a" (oldval) : "memory");
+ else
+ asm("cmpxchgl %2, %0; setz %b1" :
+ "+m" (*val), "=r" (ret) : "r" (newval), "a" (oldval) : "memory");
+
+ return ret;
+}
+
+static inline unsigned long ll_xchg(uint32_t *ptr, uint32_t val)
+{
+ asm("xchgl %0, %1" : "+r" (val), "+m" (*ptr));
+ return val;
+}
+
+#endif
--- /dev/null
+// Memory and I/O barriers. These are not privileged.
+
+#ifndef _LL_ARCH_X86C_BARRIERS_FENCE_H
+#define _LL_ARCH_X86C_BARRIERS_FENCE_H
+
+#include <lowlevel/types.h>
+
+static inline void ll_membar()
+{
+ asm volatile("mfence" : : : "memory");
+}
+
+static inline void ll_membar_load_after_load()
+{
+ asm volatile("lfence" : : : "memory");
+}
+
+static inline void ll_membar_load_after_store()
+{
+ ll_membar();
+}
+
+static inline void ll_membar_load_after_any()
+{
+ ll_membar();
+}
+
+static inline void ll_membar_any_after_load()
+{
+ ll_membar();
+}
+
+static inline void ll_membar_store_after_load()
+{
+ ll_membar();
+}
+
+static inline void ll_membar_store_after_store()
+{
+ asm volatile("sfence" : : : "memory");
+}
+
+static inline void ll_membar_store_after_any()
+{
+ ll_membar();
+}
+
+static inline void ll_membar_any_after_store()
+{
+ ll_membar();
+}
+
+static inline void ll_smp_membar()
+{
+ if (_LL_SMP)
+ ll_membar();
+ else
+ ll_barrier();
+}
+
+static inline void ll_smp_membar_load_after_load()
+{
+ if (_LL_SMP)
+ ll_membar_load_after_load();
+ else
+ ll_barrier();
+}
+
+static inline void ll_smp_membar_load_after_store()
+{
+ ll_smp_membar();
+}
+
+static inline void ll_smp_membar_load_after_any()
+{
+ ll_smp_membar();
+}
+
+static inline void ll_smp_membar_any_after_load()
+{
+ ll_smp_membar();
+}
+
+static inline void ll_smp_membar_store_after_load()
+{
+ ll_membar();
+}
+
+static inline void ll_smp_membar_store_after_store()
+{
+ if (_LL_SMP)
+ ll_membar_store_after_store();
+ else
+ ll_barrier();
+}
+
+static inline void ll_smp_membar_store_after_any()
+{
+ ll_smp_membar();
+}
+
+static inline void ll_smp_membar_any_after_store()
+{
+ ll_smp_membar();
+}
+
+#endif
--- /dev/null
+#ifndef _LL_ARCH_X86C_CLOCK_H
+#define _LL_ARCH_X86C_CLOCK_H
+
+#include <stdint.h>
+
+// Return a monotonically increasing 64-bit clock value that wraps
+// around no more often than once per year, and with a resolution of 1
+// us or better (if possible). This function's availability and
+// privileged status is architecture and configuration dependent; on
+// x86 and x64, it is normally not privileged. On some architectures,
+// and on older (486 and earlier) x86 chips, it will need to be
+// supplied externally using board or kernel specific code.
+
+static inline int64_t ll_getclock()
+{
+ int32_t high;
+ uint32_t low;
+
+ asm volatile("rdtsc" : "=a" (low), "=d" (high));
+
+ return ((int64_t)high << 32) | low;
+}
+
+static inline int32_t ll_getclock32()
+{
+ uint32_t clock;
+ asm volatile("rdtsc" : "=a" (clock) : : "edx");
+ return clock;
+}
+
+#endif
--- /dev/null
+#ifndef _LL_ARCH_X86C_MISC_H
+#define _LL_ARCH_X86C_MISC_H
+
+#include <lowlevel/types.h>
+
+// Call this inside busy loops to tell the CPU to save power,
+// use the bus less, or other CPU-specific effects.
+// This function is not privileged.
+
+static inline void ll_busywait()
+{
+ asm volatile("pause" : : : "memory");
+}
+
+// The ll_ints functions are normally privileged, but on x86 can
+// be also be used with IOPL 3.
+
+static inline void ll_ints_off()
+{
+ asm volatile("cli" : : : "memory");
+}
+
+static inline void ll_ints_on()
+{
+ asm volatile("sti" : : : "memory");
+}
+
+static inline unsigned long ll_ints_save_and_off()
+{
+ unsigned long ret;
+ asm volatile("pushf; pop %0; cli" : "=r" (ret) : : "memory");
+ return ret;
+}
+
+static inline void ll_ints_restore(unsigned long saved)
+{
+ asm volatile("push %0; popf" : : "r" (saved) : "memory");
+}
+
+// Returns true if ints on, false if off. This function is not privileged.
+
+static inline int ll_get_int_state()
+{
+ unsigned long flags;
+ asm volatile("pushf; pop %0" : "=r" (flags));
+
+ return flags & 0x200;
+}
+
+// Invalidates the TLB entry for one page. This function is privileged,
+// and requires at least a 486.
+
+static inline void ll_invalidate_tlb_entry(unsigned long addr)
+{
+ asm volatile("invlpg %0" : "+m" (*(char *)addr) : : "memory");
+}
+
+#endif
--- /dev/null
+#ifndef _LL_ARCH_ATOMIC_H
+#define _LL_ARCH_ATOMIC_H
+
+#include <lowlevel/arch-x86-common/atomic.h>
+
+static inline int ll_cmpxchg_long(unsigned long *val, unsigned long oldval,
+ unsigned long newval)
+{
+ return ll_cmpxchg((uint32_t *)val, oldval, newval);
+}
+
+static inline unsigned long ll_xchg_long(unsigned long *ptr,
+ unsigned long val)
+{
+ return ll_xchg((uint32_t *)ptr, val);
+}
+
+#endif
--- /dev/null
+#include <lowlevel/arch-x86-common/barriers-fence.h>
--- /dev/null
+// Bit manipulation functions. These functions are not privileged.
+
+#ifndef _LL_ARCH_BITOPS_H
+#define _LL_ARCH_BITOPS_H
+
+// Find First Set, counting from 0, undefined if no bits set
+static inline int ll_ffs(unsigned long val)
+{
+ unsigned long ret;
+ asm("bsfl %1, %0" : "=r" (ret) : "r" (val));
+ return ret;
+}
+
+// Find Last Set, counting from 0, undefined if no bits set
+static inline int ll_fls(unsigned long val)
+{
+ unsigned long ret;
+ asm("bsrl %1, %0" : "=r" (ret) : "r" (val));
+ return ret;
+}
+
+// As above, but on 64-bit values, regardless of sizeof(long)
+static inline int ll_ffs64(uint64_t val)
+{
+ if ((uint32_t)val)
+ return ll_ffs((uint32_t)val);
+ else
+ return ll_ffs((uint32_t)(val >> 32));
+}
+
+static inline int ll_fls64(uint64_t val)
+{
+ if ((uint32_t)(val >> 32))
+ return ll_ffs((uint32_t)(val >> 32));
+ else
+ return ll_ffs((uint32_t)val);
+}
+
+// Set/Clear the nth bit in a multiword bitmap. These functions
+// are endian and word-size dependent.
+
+static inline void ll_multiword_set_bit(unsigned long *bitmap, int bit)
+{
+ asm("bts %1, %0" : "=m" (bitmap[0]) : "r" (bit) : "memory");
+}
+
+static inline void ll_multiword_clear_bit(unsigned long *bitmap, int bit)
+{
+ asm("btr %1, %0" : "=m" (bitmap[0]) : "r" (bit) : "memory");
+}
+
+#endif
--- /dev/null
+#include <lowlevel/arch-x86-common/clock.h>
--- /dev/null
+// Byte-swapping, unaligned memory, and I/O port access functions. These
+// functions are all unprivileged, except the ll_in_* and ll_out_*
+// functions, whose privilege is architecture-dependent (on x86,
+// they require IOPL 3).
+
+#ifndef _LL_ARCH_IO_H
+#define _LL_ARCH_IO_H
+
+#include <lowlevel/types.h>
+
+// Unconditional byte-swap
+
+static inline uint16_t ll_swap16(uint16_t val)
+{
+ asm("xchgb %b0, %h0" : "+abcd" (val));
+ return val;
+}
+
+static inline uint32_t ll_swap32(uint32_t val)
+{
+ asm("bswap %0" : "+r" (val));
+ return val;
+}
+
+static inline uint64_t ll_swap64(uint64_t val)
+{
+ return (uint64_t)ll_swap32(val) | ((uint64_t)ll_swap32(val >> 32) << 32);
+}
+
+// Conditional byte-swap to/from a specific endianness
+
+static inline uint64_t ll_swap_le64(uint64_t val)
+{
+ return val;
+}
+
+static inline uint32_t ll_swap_le32(uint32_t val)
+{
+ return val;
+}
+
+static inline uint16_t ll_swap_le16(uint16_t val)
+{
+ return val;
+}
+
+static inline uint64_t ll_swap_be64(uint64_t val)
+{
+ return ll_swap64(val);
+}
+
+static inline uint32_t ll_swap_be32(uint32_t val)
+{
+ return ll_swap32(val);
+}
+
+static inline uint16_t ll_swap_be16(uint16_t val)
+{
+ return ll_swap16(val);
+}
+
+// Unaligned, host-endian
+
+static inline uint64_t ll_load_unaligned_64(void *addr)
+{
+ // FIXME: atomic using FP/MMX?
+ return *(uint64_t *)addr;
+}
+
+static inline uint32_t ll_load_unaligned_32(void *addr)
+{
+ return *(uint32_t *)addr;
+}
+
+static inline uint16_t ll_load_unaligned_16(void *addr)
+{
+ return *(uint16_t *)addr;
+}
+
+static inline void ll_store_unaligned_64(void *addr, uint64_t val)
+{
+ // FIXME: atomic using FP/MMX?
+ *(uint64_t *)addr = val;
+}
+
+static inline void ll_store_unaligned_32(void *addr, uint32_t val)
+{
+ *(uint32_t *)addr = val;
+}
+
+static inline void ll_store_unaligned_16(void *addr, uint16_t val)
+{
+ *(uint16_t *)addr = val;
+}
+
+// Unaligned, big-endian
+
+static inline uint16_t ll_load_unaligned_be16(void *addr)
+{
+ return ll_swap16(ll_load_unaligned_16(addr));
+}
+
+static inline uint32_t ll_load_unaligned_be32(void *addr)
+{
+ return ll_swap32(ll_load_unaligned_32(addr));
+}
+
+static inline uint64_t ll_load_unaligned_be64(void *addr)
+{
+ // FIXME: atomic using FP/MMX?
+ return ((uint64_t)ll_load_unaligned_be32(addr) << 32) |
+ (uint64_t)ll_load_unaligned_be32((void *)((char *)addr + 4));
+}
+
+static inline void ll_store_unaligned_be16(void *addr, uint16_t val)
+{
+ ll_store_unaligned_16(addr, ll_swap16(val));
+}
+
+static inline void ll_store_unaligned_be32(void *addr, uint32_t val)
+{
+ ll_store_unaligned_32(addr, ll_swap32(val));
+}
+
+static inline void ll_store_unaligned_be64(void *addr, uint64_t val)
+{
+ // FIXME: atomic using FP/MMX?
+ ll_store_unaligned_be32(addr, (uint32_t)(val >> 32));
+ ll_store_unaligned_be32((void *)((char *)addr + 4), (uint32_t)val);
+}
+
+// Aligned, big-endian
+
+static inline uint64_t ll_load_be64(uint64_t *addr)
+{
+ return ll_load_unaligned_be64(addr);
+}
+
+static inline uint32_t ll_load_be32(uint32_t *addr)
+{
+ return ll_load_unaligned_be32(addr);
+}
+
+static inline uint16_t ll_load_be16(uint16_t *addr)
+{
+ return ll_load_unaligned_be16(addr);
+}
+
+static inline void ll_store_be64(uint64_t *addr, uint64_t val)
+{
+ ll_store_unaligned_be64(addr, val);
+}
+
+static inline void ll_store_be32(uint32_t *addr, uint32_t val)
+{
+ ll_store_unaligned_be32(addr, val);
+}
+
+static inline void ll_store_be16(uint16_t *addr, uint16_t val)
+{
+ ll_store_unaligned_be16(addr, val);
+}
+
+// Unaligned, little-endian
+
+static inline uint64_t ll_load_unaligned_le64(void *addr)
+{
+ return ll_load_unaligned_64(addr);
+}
+
+static inline uint32_t ll_load_unaligned_le32(void *addr)
+{
+ return ll_load_unaligned_32(addr);
+}
+
+static inline uint16_t ll_load_unaligned_le16(void *addr)
+{
+ return ll_load_unaligned_16(addr);
+}
+
+static inline void ll_store_unaligned_le32(void *addr, uint32_t val)
+{
+ ll_store_unaligned_32(addr, val);
+}
+
+static inline void ll_store_unaligned_le16(void *addr, uint16_t val)
+{
+ ll_store_unaligned_16(addr, val);
+}
+
+// Aligned, little-endian
+
+static inline uint32_t ll_load_le32(uint32_t *addr)
+{
+ return ll_load_unaligned_le32(addr);
+}
+
+static inline uint16_t ll_load_le16(uint16_t *addr)
+{
+ return ll_load_unaligned_le16(addr);
+}
+
+static inline void ll_store_le32(uint32_t *addr, uint32_t val)
+{
+ ll_store_unaligned_le32(addr, val);
+}
+
+static inline void ll_store_le16(uint16_t *addr, uint16_t val)
+{
+ ll_store_unaligned_le16(addr, val);
+}
+
+// PCI/ISA/similar I/O-space; if memory-mapped, addr must include
+// the base of the I/O window.
+
+static inline uint8_t ll_in_8(uintptr_t addr)
+{
+ uint8_t val;
+ asm volatile("inb %w1, %b0" : "=a" (val) : "d" (addr) : "memory");
+ return val;
+}
+
+static inline uint16_t ll_in_le16(uintptr_t addr)
+{
+ uint16_t val;
+ asm volatile("inw %w1, %w0" : "=a" (val) : "d" (addr) : "memory");
+ return val;
+}
+
+static inline uint32_t ll_in_le32(uintptr_t addr)
+{
+ uint32_t val;
+ asm volatile("inl %w1, %0" : "=a" (val) : "d" (addr) : "memory");
+ return val;
+}
+
+static inline uint16_t ll_in_be16(uintptr_t addr)
+{
+ return ll_swap16(ll_in_le16(addr));
+}
+
+static inline uint32_t ll_in_be32(uintptr_t addr)
+{
+ return ll_swap32(ll_in_le32(addr));
+}
+
+static inline void ll_out_8(uintptr_t addr, uint8_t val)
+{
+ asm volatile("outb %b0, %w1" : : "a" (val), "d" (addr) : "memory");
+}
+
+static inline void ll_out_le16(uintptr_t addr, uint16_t val)
+{
+ asm volatile("outw %w0, %w1" : : "a" (val), "d" (addr) : "memory");
+}
+
+static inline void ll_out_le32(uintptr_t addr, uint32_t val)
+{
+ asm volatile("outl %0, %w1" : : "a" (val), "d" (addr) : "memory");
+}
+
+static inline void ll_out_be16(uintptr_t addr, uint16_t val)
+{
+ ll_out_le16(addr, ll_swap16(val));
+}
+
+static inline void ll_out_be32(uintptr_t addr, uint32_t val)
+{
+ ll_out_le32(addr, ll_swap32(val));
+}
+
+#endif
--- /dev/null
+#ifndef _LL_ARCH_MISC_H
+#define _LL_ARCH_MISC_H
+
+#include <lowlevel/arch-x86-common/misc.h>
+
+#endif
--- /dev/null
+// include/arch-x86/types.h
+
+#ifndef _LL_ARCH_TYPES_H
+#define _LL_ARCH_TYPES_H
+
+typedef unsigned char uint8_t;
+typedef unsigned short uint16_t;
+typedef unsigned int uint32_t;
+typedef unsigned long long uint64_t;
+typedef char int8_t;
+typedef short int16_t;
+typedef int int32_t;
+typedef long long int64_t;
+
+typedef unsigned long size_t;
+typedef long ssize_t;
+typedef long intptr_t;
+typedef unsigned long uintptr_t;
+typedef long ptrdiff_t;
+
+#define _LL_LONG_BYTES 4
+#define _LL_LONG_LOGBYTES 2
+
+#endif
--- /dev/null
+#ifndef _LL_ARCH_H
+#define _LL_ARCH_H
+
+// Yuck. But it avoids symlinks, and ## doesn't seem to work in this context.
+
+#define _LL_INC3() <lowlevel/arch-
+#define _LL_INC2() _LL_INC3()_LL_ARCH
+#define _LL_INC(INC) _LL_INC2()/INC>
+
+#ifdef _LL_NOSMP
+#define _LL_SMP 0
+#else
+#define _LL_SMP 1
+#endif
+
+#endif
--- /dev/null
+#ifndef _LL_ATOMIC_H
+#define _LL_ATOMIC_H
+
+#include <lowlevel/arch.h>
+#include _LL_INC(atomic.h)
+
+#endif
--- /dev/null
+#ifndef _LL_BARRIERS_H
+#define _LL_BARRIERS_H
+
+#include <lowlevel/arch.h>
+
+// A simple compilation barrier. This is not privileged.
+static inline void ll_barrier()
+{
+ asm volatile("" : : : "memory");
+}
+
+#include _LL_INC(barriers.h)
+
+#endif
--- /dev/null
+// Bit manipulation functions. These functions are not privileged.
+
+#ifndef _LL_BITOPS_H
+#define _LL_BITOPS_H
+
+#include <lowlevel/arch.h>
+#include <lowlevel/types.h>
+#include _LL_INC(bitops.h)
+
+static inline int ll_get_order_round_up(unsigned long val)
+{
+ return val != 1 ? ll_fls(val - 1) + 1 : 0;
+}
+
+static inline int ll_get_order_round_down(unsigned long val)
+{
+ return ll_fls(val);
+}
+
+// Note that the multiword bit scans are endian and word size dependent.
+// They return -1 if no suitable bit was found. Start and end are
+// in bits.
+
+static inline int ll_multiword_ffs(unsigned long *bitmap, int start, int len)
+{
+ static const int bits_per_long = sizeof(unsigned long) * 8;
+ int off = start / bits_per_long;
+ int shift_first = start % bits_per_long;
+
+ if (shift_first) {
+ unsigned long shifted = *bitmap >> shift_first;
+
+ if (shifted)
+ return ll_ffs(shifted) + start;
+
+ off++;
+ start = off * bits_per_long;
+ }
+
+ while (off < len / bits_per_long) {
+ if (bitmap[off])
+ break;
+
+ off++;
+ start += bits_per_long;
+ }
+
+ if (start < len && bitmap[off]) {
+ int ret = start + ll_ffs(bitmap[off]);
+
+ if (ret < len)
+ return ret;
+ }
+
+ return -1;
+}
+
+static inline int ll_multiword_ffc(unsigned long *bitmap, int start, int len)
+{
+ static const int bits_per_long = sizeof(unsigned long) * 8;
+ int off = start / bits_per_long;
+ int shift_first = start % bits_per_long;
+
+ if (shift_first) {
+ unsigned long shifted = *bitmap >> shift_first;
+
+ if (~shifted)
+ return ll_ffs(~shifted) + start;
+
+ off++;
+ start = off * bits_per_long;
+ }
+
+ while (off < len / bits_per_long) {
+ if (~bitmap[off])
+ break;
+
+ off++;
+ start += bits_per_long;
+ }
+
+ if (start < len && ~bitmap[off]) {
+ int ret = start + ll_ffs(~bitmap[off]);
+
+ if (ret < len)
+ return ret;
+ }
+
+ return -1;
+}
+
+#endif
--- /dev/null
+#ifndef _LL_CLOCK_H
+#define _LL_CLOCK_H
+
+#include <lowlevel/arch.h>
+#include _LL_INC(clock.h)
+
+#endif
--- /dev/null
+#ifndef _LL_IO_H
+#define _LL_IO_H
+
+#include <lowlevel/arch.h>
+#include _LL_INC(io.h)
+
+#endif
--- /dev/null
+#ifndef _LL_MISC_H
+#define _LL_MISC_H
+
+#include <lowlevel/arch.h>
+#include _LL_INC(misc.h)
+
+#endif
--- /dev/null
+#ifndef _LL_TYPES_H
+#define _LL_TYPES_H
+
+#include <lowlevel/arch.h>
+
+// Normally, stdint.h will end up referring to this file, so the
+// #ifndef will prevent duplicate typedefs. If using these headers
+// on another OS whose stdint.h does not use this file, define
+// _LL_NO_STDINT and make sure that the stdint definitions have
+// been defined before including any lowlevel-lib files.
+
+#ifndef _LL_NO_STDINT
+#include _LL_INC(types.h)
+#endif
+
+#endif
--- /dev/null
+#ifndef _C_STDINT_H
+#define _C_STDINT_H
+
+#include <lowlevel/types.h>
+
+#endif
--- /dev/null
+#ifndef _C_STRING_H
+#define _C_STRING_H
+
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int memcmp(const void *b1, const void *b2, size_t len);
+size_t strlen(const char *s);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+TOP := $(shell dirname `pwd -P`)
+COMP := kernel
+BUILDTYPE := kernel
+include ../Makefile.head
+
+CXXINCS += -I$(BUILDDIR)/include -Iinclude
+DEFS += -D_KERNEL
+
+ASFLAGS += $(CXXINCS)
+
+# Must come first; contains entry code
+include arch/$(ARCH)/Makefile
+
+include core/Makefile
+include mem/Makefile
+include io/Makefile
+include orb/Makefile
+include lib/Makefile
+include tests/Makefile
+
+TARGETS := $(BUILDDIR)/kernel $(BUILDDIR)/kernel.stripped
+
+.PHONY: symlinks
+symlinks:
+ @echo kernel: Creating arch symlink
+ @$(RM) $(BUILDDIR)/include/arch
+ @$(LN) $(shell pwd)/include/arch-$(ARCH) $(BUILDDIR)/include/arch
+
+PREDEP := symlinks
+
+include ../Makefile.tail
+
+# Must come last: contains kernel target
+include arch/$(ARCH)/Makefile.final
+
--- /dev/null
+DIR := arch/x64/
+DIRS += $(DIR)
+
+RAW_ASFILES := entry
+RAW_CXXFILES := descriptors misc multiboot mem thread
+
+ASFILES += $(RAW_ASFILES:%=$(DIR)%)
+CXXFILES += $(RAW_CXXFILES:%=$(DIR)%)
+
+include arch/x86-common/Makefile
--- /dev/null
+DIR := arch/x64/
+CXXFLAGS += -mno-red-zone -mcmodel=kernel -fno-omit-frame-pointer
+
+$(BUILDDIR)/kernel: $(OBJS) $(DIR)linker-script
+ @echo $(COMP): Linking kernel: $@
+ @$(MKDIR) $(dir $@)
+ @$(LD) $(OBJS) -o "$@" -T$(DIR)linker-script
+
+$(BUILDDIR)/kernel.stripped: $(BUILDDIR)/kernel
+ @echo $(COMP): Stripping kernel: $@
+ @$(STRIP) $(BUILDDIR)/kernel -o "$@"
--- /dev/null
+// arch/x64/descriptors.cc -- code to manage segment/trap/interrupt descriptors
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/types.h>
+#include <arch/addrs.h>
+#include <arch/thread.h>
+#include <arch/mem.h>
+
+namespace Arch {
+namespace Priv {
+ TSS tss;
+}
+}
+
+using Arch::Priv::Descriptor;
+using Arch::Priv::TSS;
+using Arch::Priv::tss;
+
+Descriptor x64_gdt[1024] = {
+ {}, // The first entry is reserved for the NULL selector.
+ { // 0x08: data
+ limit_low: 0xffff,
+ base_low: 0,
+ base_mid: 0,
+ type: 2, // data segment, writable
+ user: 1,
+ dpl: 0,
+ present: 1,
+ limit_high: 0xff,
+ sw: 0,
+ code64: 0,
+ opsize: 1,
+ gran: 1,
+ base_high: 0
+ },
+ { // 0x10: 32-bit code
+ limit_low: 0xffff,
+ base_low: 0,
+ base_mid: 0,
+ type: 10, // code segment, readable
+ user: 1,
+ dpl: 0,
+ present: 1,
+ limit_high: 0xff,
+ sw: 0,
+ code64: 0,
+ opsize: 1,
+ gran: 1,
+ base_high: 0
+ },
+ { // 0x18: 64-bit code
+ limit_low: 0xffff,
+ base_low: 0,
+ base_mid: 0,
+ type: 10, // code segment, readable
+ user: 1,
+ dpl: 0,
+ present: 1,
+ limit_high: 0xff,
+ sw: 0,
+ code64: 1,
+ opsize: 0,
+ gran: 1,
+ base_high: 0
+ },
+ { // 0x20: TSS
+ limit_low: sizeof(TSS) - 4,
+ base_low: 0,
+ base_mid: 0,
+ type: 9,
+ user: 0,
+ dpl: 0,
+ present: 1,
+ limit_high: 0,
+ sw: 0,
+ code64: 0,
+ opsize: 0,
+ gran: 0,
+ base_high: 0,
+ },
+ { // 0x28: TSS high
+ }
+};
+
+struct X64DescriptorTablePointer {
+ u8 pad[6];
+ u16 limit;
+ u64 address;
+};
+
+X64DescriptorTablePointer x64_gdtr = {
+ // G++ still won't handle complex labelled initializers, so
+ // we have to explicitly initialize pad.
+
+ pad: { 0, 0, 0, 0, 0, 0 },
+ limit: sizeof(x64_gdt),
+ address: reinterpret_cast<u64>(&x64_gdt)
+};
+
+X64DescriptorTablePointer x64_gdtr_phys = {
+ pad: { 0, 0, 0, 0, 0, 0 },
+ limit: sizeof(x64_gdt),
+ address: reinterpret_cast<u64>(&x64_gdt) - KERNEL_START
+};
+
+struct X64InterruptDescriptor {
+ u16 offset_low;
+ u16 selector;
+ u8 stack_index:3;
+ u8 reserved:5;
+ u8 type:4;
+ u8 user:1;
+ u8 dpl:2;
+ u8 present:1;
+ u16 offset_mid;
+ u32 offset_high;
+ u32 reserved2;
+};
+
+static X64InterruptDescriptor idt[256];
+
+static X64DescriptorTablePointer idtr = {
+ pad: { 0, 0, 0, 0, 0, 0 },
+ limit: sizeof(idt),
+ address: reinterpret_cast<u64>(&idt)
+};
+
+// Set a gate for INT num to start executing at addr.
+//
+// If ints_off is set, then interrupts will remain disabled until
+// software enables them; otherwise, the interrupt flag will have the
+// same state as in the code that was interrupted.
+//
+// If user is true, then this gate can be called directly from
+// userspace with the INT instruction. Otherwise, usermode
+// INT to this gate will cause a GPF, but it may still be reached
+// from user code via hardware interrupt or exception.
+//
+// If stack_index is non-zero, use the specified alternate stack even if
+// interrupting kernel code.
+
+static void set_int_gate(int num, void *addrptr, bool ints_off = false,
+ bool user = false, int stack_index = 0)
+{
+ u64 addr = (u64)addrptr;
+
+ X64InterruptDescriptor desc = {
+ offset_low: addr & 0xffff,
+ selector: 0x18,
+ stack_index: stack_index,
+ reserved: 0,
+ type: ints_off ? 14 : 15,
+ user: 0,
+ dpl: user ? 3 : 0,
+ present: 1,
+ offset_mid: (addr >> 16) & 0xffff,
+ offset_high: addr >> 32
+ };
+
+ idt[num] = desc;
+}
+
+extern int x64_diverr, x64_gpf, x64_page_fault, x64_invalid_insn;
+extern void *x64_irqs[256];
+
+namespace Arch {
+namespace Priv {
+ void set_idt()
+ {
+ // GCC 4.0 pukes on "m" (&idtr.limit), saying it's not
+ // directly addressable.
+
+ asm volatile("lidtq 6(%0)" : : "r" (&idtr) : "memory");
+
+ set_int_gate(0, &x64_diverr);
+ set_int_gate(6, &x64_invalid_insn);
+ set_int_gate(13, &x64_gpf);
+ set_int_gate(14, &x64_page_fault, true);
+
+ for (int i = 0x20; i < 0x30; i++)
+ set_int_gate(i, x64_irqs[i]);
+ }
+}
+}
--- /dev/null
+// arch/x86/entry.S - x64 entry points (booting and traps)
+//
+// This software is copyright (c) 2006 Scott Wood.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <arch/addrs.h>
+
+ .org 0
+ .code32
+ .global _start
+.align 4
+multiboot_hdr:
+ .long 0x1badb002 // Multi-boot magic
+
+ // Multi-boot flags:
+ // bit 0: 4KiB-align all boot modules
+ // bit 1: must include memory size and map
+ // bit 2: must include video mode table
+ // bit 16: load addresses in this header are valid
+ // and should be used instead of the ELF header
+
+ .long 0x00010003
+
+ // checksum: -(magic + flags), update if flags change
+ .long 0xe4514ffb
+
+ .long multiboot_hdr - KERNEL_START // header_addr
+ .long 0x00200000 // load_addr
+ .long 0 // load_end_addr: load whole file
+ .long bss_end - KERNEL_START // bss_end_addr
+ .long _start - KERNEL_START // entry_addr
+
+_start:
+ cld
+ cmpl $0x2badb002, %eax
+ bne no_multiboot
+
+ lgdt x64_gdtr_phys + 6 - KERNEL_START
+ ljmp $0x10, $using_our_gdt - KERNEL_START
+
+using_our_gdt:
+ movw $0x08, %ax
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %fs
+ movw %ax, %gs
+ movw %ax, %ss
+
+ movl %ebx, %esi // Save the multiboot pointer somewhere
+ // it won't be clobbered by CPUID
+
+ // This gives 256 bytes to Threads::Thread; if it gets larger
+ // this needs to be updated (as well as the code at high_vaddr).
+
+ movl $x64_init_stack + 3840 - KERNEL_START, %esp
+
+ // Test for CPUID
+ pushfl
+ popl %eax
+ movl %eax, %ebx
+ xorl $0x00200000, %eax
+ pushl %eax
+ popfl
+ pushfl
+ popl %eax
+ cmpl %eax, %ebx
+ je no_long_mode
+
+ // Test for long mode
+ movl $0x80000000, %eax
+ cpuid
+ cmpl $0x80000000, %eax
+ jbe no_long_mode
+ movl $0x80000001, %eax
+ cpuid
+ btl $29, %edx
+ jnc no_long_mode
+
+ movl $0xc0000080, %ecx // Extended Feature Enable Register (EFER)
+ xorl %edx, %edx
+ movl $0x100, %eax // Enable long mode
+ wrmsr
+
+ // enable PAE
+ movl %cr4, %eax
+ btsl $5, %eax
+ movl %eax, %cr4
+
+ // Set page table attributes
+ orl $7, x64_init_ptbl_l4 - KERNEL_START
+ orl $7, x64_init_ptbl_l3 - KERNEL_START
+// orl $7, x64_init_ptbl_l2 - KERNEL_START
+
+ // Load the initial page table
+ movl $x64_init_ptbl_l4 - KERNEL_START, %eax
+ movl %eax, %cr3
+
+ // enable paging, kernel write-protect,
+ // and internal floating point error handling
+ movl %cr0, %eax
+ orl $0x80010020, %eax
+ movl %eax, %cr0
+ ljmp $0x18, $in_code64 - KERNEL_START
+
+ .code64
+in_code64:
+ // Set up high page tables for 0xffffffff80000000 mapping,
+ // reusing the tables previously used for the low identity
+ // mapping.
+
+ movq x64_init_ptbl_l4 - KERNEL_START, %rax
+ movq %rax, x64_init_ptbl_l4 - KERNEL_START + 0xff8
+
+ movq x64_init_ptbl_l3 - KERNEL_START, %rax
+ movq %rax, x64_init_ptbl_l3 - KERNEL_START + 0xff0
+
+ lgdt x64_gdtr + 6
+ movl %esi, x64_boot_info_phys
+
+ movq $high_vaddr, %rax
+ jmp *%rax
+
+high_vaddr:
+ movq $x64_init_stack + 3840, %rsp
+ jmp start_kernel
+
+ .code32
+no_multiboot:
+ movl $no_multiboot_str - KERNEL_START, %esi
+ jmp do_print
+no_long_mode:
+ movl $no_long_mode_str - KERNEL_START, %esi
+do_print:
+ movl $0xb8000, %edi
+
+ movb (%esi), %al
+ xorl %ecx, %ecx
+1: movb %al, (%edi, %ecx, 2)
+ movb $14, 1(%edi, %ecx, 2) // It's not at the cursor, so use
+ // yellow to make it stand out.
+ incl %ecx
+ movb (%esi, %ecx), %al
+ testb %al, %al
+ jnz 1b
+
+2: jmp 2b
+
+no_long_mode_str:
+ .string "This CPU does not support long (64-bit) mode. Use a 32-bit kernel."
+
+no_multiboot_str:
+ .string "Unrecognized bootloader; a multiboot-compliant loader is required."
+
+ .code64
+
+ .macro pushvolatilesnordi
+ push %rax
+ push %rcx
+ push %rdx
+ push %rsi
+ push %r8
+ push %r9
+ push %r10
+ push %r11
+ .endm
+
+ .macro pushvolatiles
+ push %rax
+ pushvolatilesnordi
+ .endm
+
+ .macro popvolatiles
+ pop %r11
+ pop %r10
+ pop %r9
+ pop %r8
+ pop %rsi
+ pop %rdx
+ pop %rcx
+ pop %rax
+ pop %rdi
+ .endm
+
+ // Non-volatile registers must be pushed if the handler will
+ // need to access all of the interrupted code's registers,
+ // such as when producing an error dump. Does not include
+ // r15, as that is usually swapped with the error code.
+
+ .macro pushall
+ push %r14
+ push %r13
+ push %r12
+ push %r11
+ push %r10
+ push %r9
+ push %r8
+ push %rdi
+ push %rsi
+ push %rbp
+ push %rsp
+ push %rbx
+ push %rdx
+ push %rcx
+ push %rax
+ .endm
+
+ .macro pushallnoerr
+ push %r15
+ pushall
+ .endm
+
+ .macro popall
+ pop %rax
+ pop %rcx
+ pop %rdx
+ pop %rbx
+ addq $8, %rsp
+ pop %rbp
+ pop %rsi
+ pop %rdi
+ pop %r8
+ pop %r9
+ pop %r10
+ pop %r11
+ pop %r12
+ pop %r13
+ pop %r14
+ pop %r15
+ .endm
+
+ .global x64_diverr
+x64_diverr:
+ pushallnoerr
+
+ movq %rsp, %rdi
+ call x64_do_diverr
+
+ popall
+ iretq
+
+ .global x64_invalid_insn
+x64_invalid_insn:
+ pushallnoerr
+
+ movq %rsp, %rdi
+ call x64_do_invalid_insn
+
+ popall
+ iretq
+
+ .global x64_gpf
+x64_gpf:
+ xchgq %r15, (%rsp) // get error code
+ pushall
+
+ movq %rsp, %rdi
+ movq %r15, %rsi
+ call x64_do_gpf
+
+ popall
+ iretq
+
+ .global x64_page_fault
+x64_page_fault:
+ xchgq %r15, (%rsp) // get error code
+ pushall
+
+ movq %rsp, %rdi
+ movq %cr2, %rsi
+ movq %r15, %rdx
+ call x64_do_page_fault
+
+ popall
+ iretq
+
+ .global x64_irq
+x64_irq:
+ pushvolatilesnordi
+ subq $8, %rsp // Keep the stack frame 16-byte aligned
+
+ call x64_do_irq
+ movl need_resched, %eax
+ testl %eax, %eax
+ jnz x64_reschedule
+
+x64_ret_irq:
+ addq $8, %rsp
+ popvolatiles
+ iretq
+
+x64_reschedule:
+ // The cli is to make sure interrupts don't get re-enabled in
+ // this thread context between the schedule and the ret from
+ // IRQ.
+
+ cli
+ call schedule
+ jmp x64_ret_irq
+
+ .section ".irqs","x"
+ .global x64_irqs
+x64_irqs:
+ .text
+
+ .macro irq from,to
+1: push %rdi
+ movq $\from, %rdi
+ jmp x64_irq
+ .section ".irqs","x"
+ .quad 1b
+ .text
+ .if \to-\from
+ irq (\from+1),\to
+ .endif
+ .endm
+
+ .macro irq16 from,to
+ irq \from,(\from+15)
+ .if \to-\from
+ irq16 (\from+16),\to
+ .endif
+ .endm
+
+ irq16 0,240
+
+ .global x64_new_thread
+x64_new_thread:
+ xorq %rax, %rax
+ xorq %rbx, %rbx
+ xorq %rcx, %rcx
+ xorq %rdx, %rdx
+ xorq %rbp, %rbp
+ xorq %r8, %r8
+ xorq %r9, %r9
+ xorq %r10, %r10
+ xorq %r11, %r11
+ xorq %r12, %r12
+ xorq %r13, %r13
+ xorq %r14, %r14
+ xorq %r15, %r15
+
+ call sched_new_thread
+ pop %rsi
+ pop %rdi
+ call *%rsi
+ call exit_thread
+ ud2a
--- /dev/null
+ENTRY(_start)
+PHDRS
+{
+ all PT_LOAD FILEHDR PHDRS AT (0xffffffff80200000);
+}
+SECTIONS
+{
+ . = 0xffffffff80200000 + SIZEOF_HEADERS;
+ .text : {
+ *(.text) *(.gnu.linkonce.t.*)
+ *(.rodata*) *(.gnu.linkonce.r.*)
+ *(.eh_frame) *(.rela.eh_frame) *(.gcc_except_table)
+ ctors = .;
+ *(.ctors)
+ QUAD(0)
+ ctors_end = .;
+ *(.irqs)
+ } :all
+ . = . + 0x1000;
+ data_start = .;
+ .data : { *(.data) *(.gnu.linkonce.d.*) } :all
+ bss_start = .;
+ .bss : { *(.bss) } :all
+ bss_end = .;
+ _end = .;
+}
--- /dev/null
+// arch/x64/mem.cc -- x64 misc. memory management
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/kernel.h>
+#include <kern/mem.h>
+#include <kern/pagealloc.h>
+#include <kern/libc.h>
+
+// Initial page tables have the first 4 MiB mapped, using large pages.
+
+__attribute__((aligned(4096))) u64 x64_init_ptbl_l2[512] = {
+ 0x87,
+ 0x00200087,
+};
+
+// The ORing of 7 into these entries will be done in entry.S;
+// doing it here causes the compiler to emit runtime code
+// to do it.
+
+__attribute__((aligned(4096))) u64 x64_init_ptbl_l3[512] = {
+ reinterpret_cast<u64>(x64_init_ptbl_l2) - KERNEL_START
+};
+
+__attribute__((aligned(4096))) u64 x64_init_ptbl_l4[512] = {
+ reinterpret_cast<u64>(x64_init_ptbl_l3) - KERNEL_START
+};
+
+extern int _end;
+
+using Mem::PageAllocZone;
+
+namespace Arch {
+ size_t mem_end;
+
+ PageAllocZone *pagezones[3];
+
+ namespace Priv {
+ PageAllocZone isadmazone, dma32zone, highzone;
+
+ PageAllocZone *isadmazonelist[2] = { &isadmazone, NULL };
+ PageAllocZone *dma32zonelist[3];
+ PageAllocZone *normalzonelist[4];
+ }
+
+ PageAllocZone **pagezonelists[3] = { Priv::normalzonelist,
+ Priv::dma32zonelist,
+ Priv::isadmazonelist };
+
+ uintptr_t next_free_bootmem = reinterpret_cast<uintptr_t>(&_end) -
+ KERNEL_START + PHYSMEM_START;
+
+ namespace Priv {
+ void early_adjust_mappings()
+ {
+ using Mem::get_bootmem;
+
+ // Clear low-address mappings and invalidate TLB
+ x64_init_ptbl_l4[0] = 0;
+ x64_init_ptbl_l3[0] = 0;
+ asm volatile("movq %0, %%cr3" : : "r" (kvirt_to_phys(x64_init_ptbl_l4)));
+
+ // Mark the ktext mapping global now that it's not mapped at address
+ // zero.
+
+ x64_init_ptbl_l2[0] |= 0x100;
+ x64_init_ptbl_l2[1] |= 0x100;
+
+ u64 l3phys = kvirt_to_phys(get_bootmem(page_size, page_size));
+ u64 *l3 = static_cast<u64 *>(phys_to_ktext(l3phys));
+ bzero(l3, page_size);
+ x64_init_ptbl_l4[0x100] = l3phys | 7;
+
+ u64 l2phys = kvirt_to_phys(get_bootmem(page_size, page_size));
+ u64 *l2 = static_cast<u64 *>(phys_to_ktext(l2phys));
+ bzero(l2, page_size);
+ l3[0] = l2phys | 7;
+
+ // Map at least as much as is mapped in ktext, so that
+ // things like the VGA driver can use it early without
+ // having to know about phys_to_ktext, and so map_physmem
+ // doesn't need to special-case the use of phys_to_ktext
+ // for the first couple pages.
+
+ l2[0] = 0x187;
+ l2[1] = 0x00200187;
+ }
+
+ void map_physmem()
+ {
+ using Mem::get_bootmem;
+
+ // phys_to_ktext can be used for the first
+ // 2MiB-minus-size-of-kernel of bootmem allocations.
+
+ for (uintptr_t physpage = 2; physpage <= (mem_end - 1) / 512; physpage++)
+ {
+ uintptr_t virtpage = physpage + (PHYSMEM_START >> 21);
+
+ u64 *l3;
+ u64 l3phys = x64_init_ptbl_l4[(virtpage >> 18) & 511] &
+ ~(page_size - 1);
+
+ if (!l3phys) {
+ l3 = static_cast<u64 *>(get_bootmem(page_size, page_size));
+ bzero(l3, page_size);
+ x64_init_ptbl_l4[(virtpage >> 18) & 511] =
+ kvirt_to_phys(l3) | 7;
+ } else {
+ l3 = static_cast<u64 *>(phys_to_kvirt(l3phys));
+ }
+
+ u64 *l2;
+ u64 l2phys = l3[(virtpage >> 9) & 511] & ~(page_size - 1);
+
+ if (!l2phys) {
+ l2 = static_cast<u64 *>(get_bootmem(page_size, page_size));
+ bzero(l2, page_size);
+ l3[(virtpage >> 9) & 511] = kvirt_to_phys(l2) | 7;
+ } else {
+ l2 = static_cast<u64 *>(phys_to_kvirt(l2phys));
+ }
+
+ l2[virtpage & 511] = (physpage << 21) | 0x187;
+ }
+
+ size_t pages_size = mem_end * sizeof(Mem::Page);
+ Mem::pages = static_cast<Mem::Page *>(get_bootmem(pages_size, 8));
+ Mem::last_page = Mem::pages + pages_size - 1;
+ bzero(Mem::pages, pages_size);
+
+ int normal = 0, dma = 0;
+ uintptr_t highstart = highzonestart;
+ uintptr_t dma32start = dma32zonestart;
+
+ if (mem_end > highstart) {
+ normalzonelist[normal++] = &highzone;
+ highzone.init(highstart, mem_end - highstart);
+ } else {
+ highstart = mem_end;
+ }
+
+ if (mem_end > dma32start) {
+ normalzonelist[normal++] = &dma32zone;
+ dma32zonelist[dma++] = &dma32zone;
+ dma32zone.init(dma32start, highstart - dma32start);
+ } else {
+ dma32start = mem_end;
+ }
+
+ normalzonelist[normal++] = &isadmazone;
+ dma32zonelist[dma++] = &isadmazone;
+
+ isadmazone.init(mem_start, dma32start);
+ }
+ }
+}
--- /dev/null
+// arch/x64/misc.cc -- Misc. arch-specific stuff
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/types.h>
+#include <kern/libc.h>
+#include <kern/arch.h>
+#include <kern/i8259.h>
+#include <kern/time.h>
+#include <kern/thread.h>
+#include <kern/mem.h>
+
+#include <arch/addrs.h>
+#include <arch/multiboot.h>
+
+extern u64 x64_init_ptbl_l4[512];
+
+struct X64InitStack {
+ u8 stack[4096 - ::Threads::thread_size];
+ ::Threads::Thread thread;
+} __attribute__((aligned(4096))) x64_init_stack;
+
+namespace Arch {
+ namespace Priv {
+ void set_idt();
+
+ void show_regs(u64 *stack) {
+ for (int i = 0; i < 16; i += 4)
+ printf("r%02d 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ i, stack[i], stack[i + 1], stack[i + 2], stack[i + 3]);
+
+ printf("orig rsp: 0x%016llx rflags: 0x%016llx\n",
+ stack[19], stack[18]);
+
+ printf("Thread %p (%s)\n", curthread, curthread->name);
+
+ printf("Stack trace: ");
+ u64 *frame = (u64 *)stack[5];
+
+ for (int i = 1; i < 32; i++) {
+ u64 stackptr = frame[1];
+ frame = (u64 *)frame[0];
+
+ if ((u64)frame < 0xffff800000000000)
+ break;
+
+ if (!(i & 3))
+ printf("\n");
+
+ printf("0x%016llx ", stackptr);
+ }
+ }
+
+ struct TimerInt : public IRQ::Interrupt {
+ bool action()
+ {
+ Time::monotonic_timers->run();
+ return true;
+ }
+ };
+
+ TimerInt timer_int;
+ }
+
+ using IRQ::i8259;
+ ::Threads::Thread *init_thread;
+
+ void arch_init()
+ {
+ init_thread = &x64_init_stack.thread;
+ Priv::early_adjust_mappings();
+ Priv::set_idt();
+ Priv::MultiBoot::process_info();
+ i8259.init();
+
+ u64 tss_addr = reinterpret_cast<u64>(&Priv::tss) + 4;
+ x64_gdt[4].base_low = tss_addr & 0xffff;
+ x64_gdt[4].base_mid = (tss_addr & 0xff0000) >> 16;
+ x64_gdt[4].base_high = (tss_addr & 0xff000000) >> 24;
+
+ asm volatile("ltr %w0" : : "r" (0x20) : "memory");
+ init_thread->addr_space = new Mem::AddrSpace(x64_init_ptbl_l4);
+ init_thread->active_addr_space = init_thread->addr_space;
+ }
+
+ void timer_init()
+ {
+ IRQ::InterruptSlot *timer = i8259.get_slot(0);
+ i8259.request_int(timer, &Priv::timer_int);
+ }
+
+ void ArchThread::init(void *entry, void *arg)
+ {
+ void **stack = reinterpret_cast<void **>(this);
+
+ *--stack = arg;
+ *--stack = entry;
+
+ rsp = stack;
+ rbp = 0;
+ jump_to_init = 1;
+ }
+}
+
+using Arch::Priv::show_regs;
+
+extern "C" void x64_do_diverr(u64 *stack)
+{
+ printf("Division error at 0x%04llx:0x%016llx\n", stack[17], stack[16]);
+ show_regs(stack);
+ for(;;);
+}
+
+extern "C" void x64_do_invalid_insn(u64 *stack)
+{
+ printf("Invalid instruction at 0x%04llx:0x%016llx\n", stack[17], stack[16]);
+ show_regs(stack);
+ for(;;);
+}
+
+extern "C" void x64_do_page_fault(u64 *stack, u64 fault_addr, u32 error_code)
+{
+ Mem::AddrSpace *as;
+
+ if (in_fault)
+ for(;;);
+
+ // A reserved bit was set in the PTE; this is always a bug.
+ if (error_code & 8)
+ goto bad_fault;
+
+ // Don't try to fix up a page fault if interrupts were disabled. It is an
+ // error to access non-locked pages with interrupts disabled. Trying to
+ // fix it up in the case of an access that would be legitimate if interrupts
+ // were enabled would simply mask the loss of atomicity, and trying to grab
+ // locks to look up the address if it is a completely bad reference won't
+ // accomplish much other than decreasing the odds that the fault message
+ // gets out.
+
+ if (!(stack[18] & 0x200))
+ goto bad_fault;
+
+ // Don't allow fault-ins using a borrowed addr-space.
+ as = curthread->addr_space;
+
+ if (!as || curthread == Arch::init_thread)
+ goto bad_fault;
+
+ ll_ints_on();
+
+ // FIXME: no-exec
+ if (as->handle_fault(fault_addr, error_code & 2,
+ false /* error_code & 16 */, error_code & 4))
+ return;
+
+ // FIXME: throw exception to user
+
+bad_fault:
+ ll_ints_off();
+ in_fault++;
+
+ printf("Page fault at 0x%04llx:0x%016llx for 0x%016llx\n",
+ stack[17], stack[16], fault_addr);
+ printf("Error code: 0x%04x\n", error_code);
+
+ show_regs(stack);
+
+ for(;;);
+}
+
+extern "C" void x64_do_gpf(u64 *stack, u32 error_code)
+{
+ if (in_fault)
+ for(;;);
+
+ in_fault++;
+
+ printf("General protection fault at 0x%04llx:0x%016llx, "
+ "Error code: 0x%04x\n",
+ stack[17], stack[16], error_code);
+
+ show_regs(stack);
+
+ for(;;);
+}
+
+extern "C" void x64_do_irq(int irq)
+{
+ IRQ::i8259.handle_irq(irq - 0x20);
+}
--- /dev/null
+// arch/x64/multiboot.cc -- Code to interpret multiboot data and initialize
+// free memory regions based thereupon.
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/libc.h>
+#include <kern/pagealloc.h>
+
+#include <arch/multiboot.h>
+#include <arch/addrs.h>
+#include <arch/paging.h>
+#include <arch/mem.h>
+
+#include <limits.h>
+
+u32 x64_boot_info_phys;
+
+namespace Arch {
+namespace Priv {
+namespace MultiBoot {
+ BootInfo *boot_info;
+
+ void parse_mmap()
+ {
+ }
+
+ void make_mem_avail(uintptr_t page_start, uintptr_t page_end)
+ {
+ using Mem::pages;
+
+ if (page_start < dma32zonestart) {
+ size_t len;
+
+ if (page_end <= dma32zonestart)
+ len = page_end - page_start;
+ else
+ len = dma32zonestart - page_start;
+
+ isadmazone.free(&pages[page_start], len);
+ page_start = dma32zonestart;
+ }
+
+ if (page_start < highzonestart && page_end > dma32zonestart) {
+ size_t len;
+
+ if (page_end <= highzonestart)
+ len = page_end - page_start;
+ else
+ len = highzonestart - page_start;
+
+ dma32zone.free(&pages[page_start], len);
+ page_start = highzonestart;
+ }
+
+ if (page_end > highzonestart)
+ dma32zone.free(&pages[page_start], page_end - highzonestart);
+ }
+
+ void process_info()
+ {
+ if (x64_boot_info_phys > max_ktext_map) {
+ printf("Cannot access boot info at %#.8x\n", x64_boot_info_phys);
+ for(;;);
+ }
+
+ boot_info = (BootInfo *)phys_to_ktext(x64_boot_info_phys);
+ mem_end = 0;
+
+ if (!(boot_info->flags & (1 << BootInfo::flag_mmap))) {
+ // FIXME: use mem_lower and mem_upper in this case.
+ printf("MultiBoot info does not contain a memory map.\n");
+ for(;;);
+ }
+
+ printf("BIOS Memory Map:\n");
+
+ uint off = 0;
+ while (off < boot_info->mmap_length) {
+ u32 phys = boot_info->mmap_addr + off;
+
+ if (phys > max_ktext_map) {
+ printf("Cannot access BIOS memory map entry at %#.8x\n", phys);
+ for(;;);
+ }
+
+ MemMap *mmap = (MemMap *)phys_to_ktext(phys);
+
+ printf("0x%016llx - 0x%016llx, type %d\n",
+ mmap->base, mmap->base + mmap->len - 1, mmap->type);
+
+ off += mmap->size + 4;
+
+ if (mmap->type == MemMap::Available) {
+ size_t page_end = (mmap->base + mmap->len) / page_size;
+
+ if (page_end > mem_end)
+ mem_end = page_end;
+ }
+ }
+
+ map_physmem();
+
+ off = 0;
+ while (off < boot_info->mmap_length) {
+ u32 phys = boot_info->mmap_addr + off;
+ MemMap *mmap = (MemMap *)phys_to_ktext(phys);
+ off += mmap->size + 4;
+
+ if (mmap->type == MemMap::Available) {
+ // Don't use any page that isn't fully in this entry.
+
+ size_t page_start = (mmap->base + page_size - 1) / page_size;
+ size_t page_end = (mmap->base + mmap->len) / page_size;
+
+
+ // Don't make available any pages that overlap the
+ // kernel text/data or bootmem allocations.
+
+ uintptr_t kernelendpage =
+ (kvirt_to_phys((void *)next_free_bootmem) + page_size - 1) /
+ page_size;
+
+ // Don't overwrite page 1; the BIOS will need it for later calls in
+ // emulation, and at least the BIOS I'm testing on doesn't mark
+ // it as reserved.
+
+ if (page_start == 0)
+ page_start = 1;
+ if (page_start >= page_end)
+ continue;
+
+ if (page_start <= kernelendpage && page_end >= 512) {
+ if (page_start < 512)
+ make_mem_avail(page_start, 512);
+
+ if (page_end > kernelendpage)
+ make_mem_avail(kernelendpage + 1, page_end);
+ } else {
+ make_mem_avail(page_start, page_end);
+ }
+ }
+ }
+ }
+}
+}
+}
--- /dev/null
+// arch/x64/thread.cc -- Thread switching
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/thread.h>
+#include <kern/mem.h>
+#include <kern/pagetable.h>
+
+namespace Arch {
+ void switch_thread(Threads::Thread *dest, Threads::Thread *src)
+ {
+ u64 dummy1, dummy2;
+
+ if (dest->addr_space) {
+ assert(dest->addr_space == dest->active_addr_space);
+
+ if (dest->addr_space != src->active_addr_space) {
+ u64 cr3 = Mem::kvirt_to_phys(dest->addr_space->
+ page_table->toplevel);
+ asm volatile("movq %0, %%cr3" : : "r" (cr3) : "memory");
+ }
+ } else {
+ dest->active_addr_space = src->active_addr_space;
+ }
+
+ Priv::tss.rsp[0] = reinterpret_cast<u64>(dest);
+
+ asm volatile("movq %%rsp, (%0);"
+ "movq %%rbp, 8(%0);"
+ "movb $0, 16(%0);"
+ "movb 16(%1), %%al;"
+ "cmpb $0, %%al;"
+ "movq (%1), %%rsp;"
+ "movq 8(%1), %%rbp;"
+ "jnz x64_new_thread;" :
+ "=a" (dummy1), "=c" (dummy2) :
+ "0" (&src->arch.rsp), "1" (&dest->arch.rsp) :
+ "rbx", "rdx", "rsi", "rdi",
+ "r8", "r9", "r10", "r11", "r12",
+ "r13", "r14", "r15", "memory");
+ }
+}
--- /dev/null
+DIR := arch/x86-common/
+DIRS += $(DIR)
+
+RAW_CXXFILES := local-apic
+CXXFILES += $(RAW_CXXFILES:%=$(DIR)%)
--- /dev/null
+DIR := arch/x86/
+DIRS += $(DIR)
+
+RAW_ASFILES := entry
+RAW_CXXFILES := descriptors misc multiboot mem thread
+
+ASFILES += $(RAW_ASFILES:%=$(DIR)%)
+CXXFILES += $(RAW_CXXFILES:%=$(DIR)%)
+
+include arch/x86-common/Makefile
--- /dev/null
+DIR := arch/x86/
+CXXFLAGS += -fno-omit-frame-pointer -march=i686
+
+$(BUILDDIR)/kernel: $(OBJS) $(DIR)linker-script
+ @echo $(COMP): Linking kernel: $@
+ @$(MKDIR) $(dir $@)
+ @$(CXX) $(OBJS) -lgcc -lsupc++ -nostdlib -o "$@" -Wl,-T$(DIR)linker-script
+
+# GRUB refuses to use the addresses in the multiboot header if it
+# finds a valid ELF header, so the dd hacks a zero into the high byte
+# of the physical address field. I tried using AT() in the linker
+# script to properly generate separate physical and virtual
+# addresses, but that caused the strip command to issue a warning and
+# wipe out the BSS (setting memsz to filesz).
+
+$(BUILDDIR)/kernel.stripped: $(BUILDDIR)/kernel
+ @echo $(COMP): Stripping kernel: $@
+ @$(STRIP) $(BUILDDIR)/kernel -o "$@"
+ @dd if=/dev/zero of=$(BUILDDIR)/kernel.stripped conv=notrunc \
+ bs=1 count=1 seek=67
--- /dev/null
+// arch/x86/descriptors.cc -- code to manage segment/trap/interrupt descriptors
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/types.h>
+#include <arch/addrs.h>
+#include <arch/thread.h>
+#include <arch/mem.h>
+
+namespace Arch {
+namespace Priv {
+ TSS tss;
+}
+}
+
+using Arch::Priv::Descriptor;
+using Arch::Priv::TSS;
+using Arch::Priv::tss;
+
+Descriptor x86_gdt[1024] = {
+ {}, // The first entry is reserved for the NULL selector.
+ { // 0x08: data
+ limit_low: 0xffff,
+ base_low: 0,
+ base_mid: 0,
+ type: 2, // data segment, writable
+ user: 1,
+ dpl: 0,
+ present: 1,
+ limit_high: 0xff,
+ sw: 0,
+ reserved: 0,
+ opsize: 1,
+ gran: 1,
+ base_high: 0
+ },
+ { // 0x10: code
+ limit_low: 0xffff,
+ base_low: 0,
+ base_mid: 0,
+ type: 10, // code segment, readable
+ user: 1,
+ dpl: 0,
+ present: 1,
+ limit_high: 0xff,
+ sw: 0,
+ reserved: 0,
+ opsize: 1,
+ gran: 1,
+ base_high: 0
+ },
+ { // 0x18: TSS
+ limit_low: sizeof(tss),
+ base_low: 0,
+ base_mid: 0,
+ type: 9,
+ user: 0,
+ dpl: 0,
+ present: 1,
+ limit_high: 0,
+ sw: 0,
+ reserved: 0,
+ opsize: 0,
+ gran: 0,
+ base_high: 0
+ }
+};
+
+struct X86DescriptorTablePointer {
+ u8 pad[6];
+ u16 limit;
+ u32 address;
+};
+
+X86DescriptorTablePointer x86_gdtr = {
+ // G++ still won't handle complex labelled initializers, so
+ // we have to explicitly initialize pad.
+
+ pad: { 0, 0, 0, 0, 0, 0 },
+ limit: sizeof(x86_gdt),
+ address: reinterpret_cast<u32>(&x86_gdt)
+};
+
+X86DescriptorTablePointer x86_gdtr_phys = {
+ pad: { 0, 0, 0, 0, 0, 0 },
+ limit: sizeof(x86_gdt),
+ address: reinterpret_cast<u32>(&x86_gdt) - KERNEL_START
+};
+
+struct X86InterruptDescriptor {
+ u16 offset_low;
+ u16 selector;
+ u8 stack_index:3;
+ u8 reserved:5;
+ u8 type:4;
+ u8 user:1;
+ u8 dpl:2;
+ u8 present:1;
+ u16 offset_high;
+};
+
+static X86InterruptDescriptor idt[256];
+
+static X86DescriptorTablePointer idtr = {
+ pad: { 0, 0, 0, 0, 0, 0 },
+ limit: sizeof(idt),
+ address: reinterpret_cast<u32>(&idt)
+};
+
+// Set a gate for INT num to start executing at addr.
+//
+// If ints_off is set, then interrupts will remain disabled until
+// software enables them; otherwise, the interrupt flag will have the
+// same state as in the code that was interrupted.
+//
+// If user is true, then this gate can be called directly from
+// userspace with the INT instruction. Otherwise, usermode
+// INT to this gate will cause a GPF, but it may still be reached
+// from user code via hardware interrupt or exception.
+//
+// If stack_index is non-zero, use the specified alternate stack even if
+// interrupting kernel code.
+
+static void set_int_gate(int num, void *addrptr, bool ints_off = false,
+ bool user = false, int stack_index = 0)
+{
+ u32 addr = (u32)addrptr;
+
+ X86InterruptDescriptor desc = {
+ offset_low: addr & 0xffff,
+ selector: 0x10,
+ stack_index: stack_index,
+ reserved: 0,
+ type: ints_off ? 14 : 15,
+ user: 0,
+ dpl: user ? 3 : 0,
+ present: 1,
+ offset_high: (addr >> 16) & 0xffff,
+ };
+
+ idt[num] = desc;
+}
+
+extern int x86_diverr, x86_debug, x86_breakpoint;
+extern int x86_gpf, x86_page_fault, x86_invalid_insn, x86_int99_entry;
+extern void *x86_irqs[256];
+
+namespace Arch {
+namespace Priv {
+ void set_idt()
+ {
+ // GCC 4.0 pukes on "m" (&idtr.limit), saying it's not
+ // directly addressable.
+
+ asm volatile("lidtl 6(%0)" : : "r" (&idtr) : "memory");
+
+ set_int_gate(0, &x86_diverr);
+ set_int_gate(1, &x86_debug);
+ set_int_gate(3, &x86_breakpoint);
+ set_int_gate(6, &x86_invalid_insn);
+ set_int_gate(13, &x86_gpf);
+ set_int_gate(14, &x86_page_fault, true);
+ set_int_gate(0x99, &x86_int99_entry);
+
+ for (int i = 0x20; i < 0x30; i++)
+ set_int_gate(i, x86_irqs[i], true);
+ }
+}
+}
--- /dev/null
+// arch/x86/entry.S - x86 entry points (booting and traps)
+//
+// This software is copyright (c) 2006 Scott Wood.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <arch/addrs.h>
+
+ .org 0
+ .code32
+ .global _start
+.align 4
+multiboot_hdr:
+ .long 0x1badb002 // Multi-boot magic
+
+ // Multi-boot flags:
+ // bit 0: 4KiB-align all boot modules
+ // bit 1: must include memory size and map
+ // bit 2: must include video mode table
+ // bit 16: load addresses in this header are valid
+ // and should be used instead of the ELF header
+
+ .long 0x00010003
+
+ // checksum: -(magic + flags), update if flags change
+ .long 0xe4514ffb
+
+ .long multiboot_hdr - KERNEL_START // header_addr
+ .long 0x00200000 // load_addr
+ .long 0 // load_end_addr: load whole file
+ .long bss_end - KERNEL_START // bss_end_addr
+ .long _start - KERNEL_START // entry_addr
+
+_start:
+ cld
+ lgdt x86_gdtr_phys + 6 - KERNEL_START
+ ljmp $0x10, $using_our_gdt - KERNEL_START
+
+using_our_gdt:
+ movw $0x08, %ax
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %fs
+ movw %ax, %gs
+ movw %ax, %ss
+
+ movl %ebx, %esi // Save the multiboot pointer somewhere
+ // it won't be clobbered by CPUID
+
+ // This gives 512 bytes to Threads::Thread; if it gets larger
+ // this needs to be updated (as well as the code at high_vaddr).
+
+ movl $x86_init_stack + 3584 - KERNEL_START, %esp
+
+ // Test for CPUID
+ pushfl
+ popl %eax
+ movl %eax, %ebx
+ xorl $0x00200000, %eax
+ pushl %eax
+ popfl
+ pushfl
+ popl %eax
+ cmpl %eax, %ebx
+ je no_pentium
+
+ // Test for Page Size Extensions
+ xorl %eax, %eax
+ cpuid
+ cmpl $1, %eax
+ jb no_pentium
+
+ movl $1, %eax
+ cpuid
+ btl $3, %edx
+ jnc no_pentium
+
+ // enable PSE
+ movl %cr4, %eax
+ btsl $4, %eax
+ movl %eax, %cr4
+
+ // Load the initial page table
+ movl $x86_init_ptbl_l2 - KERNEL_START, %eax
+ movl %eax, %cr3
+
+ // enable paging, kernel write-protect,
+ // and internal floating point error handling
+ movl %cr0, %eax
+ orl $0x80010020, %eax
+ movl %eax, %cr0
+
+ // Set up high page tables for 0x80000000 mapping,
+
+ movl $0x87, x86_init_ptbl_l2 + 0x800 - KERNEL_START
+ ljmp $0x10, $paging_on - KERNEL_START
+
+paging_on:
+ lgdt x86_gdtr + 6
+ movl %esi, x86_boot_info_phys
+
+ movl $high_vaddr, %eax
+ jmp *%eax
+
+high_vaddr:
+ movl $x86_init_stack + 3584, %esp
+ jmp start_kernel
+
+no_multiboot:
+ movl $no_multiboot_str - KERNEL_START, %esi
+ jmp do_print
+no_pentium:
+ movl $no_pentium_str - KERNEL_START, %esi
+do_print:
+ movl $0xb8000, %edi
+
+ movb (%esi), %al
+ xorl %ecx, %ecx
+1: movb %al, (%edi, %ecx, 2)
+ movb $14, 1(%edi, %ecx, 2) // It's not at the cursor, so use
+ // yellow to make it stand out.
+ incl %ecx
+ movb (%esi, %ecx), %al
+ testb %al, %al
+ jnz 1b
+
+2: jmp 2b
+
+no_pentium_str:
+ .string "This kernel requires a Pentium-compatible CPU. Either CPUID or PSE is missing."
+
+no_multiboot_str:
+ .string "Unrecognized bootloader; a multiboot-compliant loader is required."
+
+ .macro pushvolatilesnoeax
+ pushl %ecx
+ pushl %edx
+ .endm
+
+ .macro pushvolatiles
+ pushl %eax
+ pushvolatilesnoeax
+ .endm
+
+ .macro popvolatiles
+ popl %edx
+ popl %ecx
+ popl %eax
+ .endm
+
+ // Non-volatile registers must be pushed if the handler will
+ // need to access all of the interrupted code's registers,
+ // such as when producing an error dump. Does not include
+ // edi, as that is usually swapped with the error code.
+
+ .macro pushall
+ pushl %esi
+ pushl %ebp
+ pushl %esp
+ pushl %ebx
+ pushl %edx
+ pushl %ecx
+ pushl %eax
+ .endm
+
+ .macro pushallnoerr
+ pushl %edi
+ pushall
+ .endm
+
+ .macro popall
+ popl %eax
+ popl %ecx
+ popl %edx
+ popl %ebx
+ addl $4, %esp
+ popl %ebp
+ popl %esi
+ popl %edi
+ .endm
+
+ .global x86_diverr
+x86_diverr:
+ pushallnoerr
+
+ pushl %esp
+ call x86_do_diverr
+ addl $4, %esp
+
+ popall
+ iret
+
+ .global x86_debug
+x86_debug:
+ pushallnoerr
+
+ pushl %esp
+ call x86_do_debug
+ addl $4, %esp
+
+ popall
+ iret
+
+ .global x86_breakpoint
+x86_breakpoint:
+ pushallnoerr
+
+ pushl %esp
+ call x86_do_breakpoint
+ addl $4, %esp
+
+ popall
+ iret
+
+ .global x86_invalid_insn
+x86_invalid_insn:
+ pushallnoerr
+
+ pushl %esp
+ call x86_do_invalid_insn
+ addl $4, %esp
+
+ popall
+ iret
+
+ .global x86_gpf
+x86_gpf:
+ xchgl %edi, (%esp) // get error code
+ pushall
+
+ movl %esp, %eax
+ pushl %edi
+ pushl %eax
+ call x86_do_gpf
+ addl $8, %esp
+
+ popall
+ iret
+
+ .global x86_page_fault
+x86_page_fault:
+ xchgl %edi, (%esp) // get error code
+ pushall
+
+ movl %esp, %ecx
+ movl %cr2, %eax
+ pushl %edi
+ pushl %eax
+ pushl %ecx
+ call x86_do_page_fault
+ addl $12, %esp
+
+ popall
+ iret
+
+ .global x86_int99_entry
+x86_int99_entry:
+ pushl %edx
+ pushl 4(%esp)
+ pushl %edx
+ pushl %ecx
+ pushl %eax
+ call invoke_method
+ addl $16, %esp
+ xorl %ecx, %ecx
+ popl %edx
+ iret
+
+ .global x86_irq
+x86_irq:
+ pushvolatilesnoeax
+
+ pushl %eax
+ call x86_do_irq
+ addl $4, %esp
+
+ movl need_resched, %eax
+ testl %eax, %eax
+ jnz x86_reschedule
+
+x86_ret_irq:
+ popvolatiles
+ iret
+
+x86_reschedule:
+ // The cli is to make sure interrupts don't get re-enabled in
+ // this thread context between the schedule and the ret from
+ // IRQ.
+
+ cli
+ call schedule
+ jmp x86_ret_irq
+
+ .section ".irqs","x"
+ .global x86_irqs
+x86_irqs:
+ .text
+
+ .macro irq from,to
+1: pushl %eax
+ movl $\from, %eax
+ jmp x86_irq
+ .section ".irqs","x"
+ .long 1b
+ .text
+ .if \to-\from
+ irq (\from+1),\to
+ .endif
+ .endm
+
+ .macro irq16 from,to
+ irq \from,(\from+15)
+ .if \to-\from
+ irq16 (\from+16),\to
+ .endif
+ .endm
+
+ irq16 0,240
+
+ .global x86_new_thread
+x86_new_thread:
+ xorl %ebx, %ebx
+ xorl %ecx, %ecx
+ xorl %edx, %edx
+ xorl %ebp, %ebp
+ xorl %esi, %esi
+ xorl %edi, %edi
+
+ call sched_new_thread
+ pop %eax
+ call *%eax
+ call exit_thread
+ ud2a
+
+ .section ".roshared","x"
+
+ // The syscall pointer must be the first thing in roshared
+ // (at vaddr 0x7fff0000), so that user code can make method
+ // invocations to find out where other stuff is.
+
+ .global x86_syscall_ptr
+x86_syscall_ptr:
+ .long x86_shared_int99 - x86_syscall_ptr + 0x7fff0000
+
+ .global x86_shared_int99
+x86_shared_int99:
+ int $0x99
+ // FIXME: search for exception handler
+ ret
--- /dev/null
+ENTRY(_start)
+PHDRS
+{
+ all PT_LOAD FILEHDR PHDRS AT (0x80200000);
+}
+SECTIONS
+{
+ . = 0x80200000 + SIZEOF_HEADERS;
+ .text : {
+ *(.text) *(.gnu.linkonce.t.*)
+ *(.rodata*) *(.gnu.linkonce.r.*)
+ . = (. + 3) & ~ 3;
+ eh_frame_begin = .;
+ *(.eh_frame)
+ LONG(0)
+ *(.rela.eh_frame) *(.gcc_except_table)
+ . = (. + 3) & ~ 3;
+ ctors = .;
+ *(.ctors)
+ LONG(0)
+ ctors_end = .;
+ *(.irqs)
+ } :all
+ . = (. + 0xfff) & ~ 0xfff;
+ roshared_start = .;
+ .roshared : { *(.roshared) } :all
+ roshared_end = .;
+ . = . | 0xfff;
+ roshared_page_end = .;
+ . = . + 1;
+ rwshared_start = .;
+ .shared_data : { *(.rwshared) } :all
+ rwshared_end = .;
+ . = . | 0xfff;
+ rwshared_page_end = .;
+ . = . + 1;
+ data_start = .;
+ .data : { *(.data) *(.gnu.linkonce.d.*) } :all
+ bss_start = .;
+ .bss : { *(.bss) } :all
+ bss_end = .;
+ _end = .;
+}
--- /dev/null
+// arch/x86/mem.cc -- x86 paging and misc. memory management
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/kernel.h>
+#include <kern/mem.h>
+#include <kern/pagealloc.h>
+#include <kern/libc.h>
+
+// Initial page tables have the first 4 MiB mapped, using large pages.
+
+__attribute__((aligned(4096))) u32 x86_init_ptbl_l2[1024] = {
+ 0x87
+};
+
+extern int _end;
+
+using Mem::PageAllocZone;
+
+namespace Arch {
+ u64 mem_end;
+
+ namespace Priv {
+ #define zonelist(x) ((x) * ((x) + 1) / 2 - 1)
+
+ PageAllocZone pagezones[num_zones];
+ PageAllocZone *pagezonelists_real[zonelist(num_zones + 1)];
+ }
+
+ PageAllocZone **pagezonelists[Priv::num_zones];
+
+ uintptr_t next_free_bootmem = reinterpret_cast<uintptr_t>(&_end);
+}
+
+namespace Arch {
+namespace Priv {
+ void early_adjust_mappings()
+ {
+ using Mem::get_bootmem;
+
+ // Clear low-address mapping and invalidate TLB
+ x86_init_ptbl_l2[0] = 0;
+ asm volatile("movl %0, %%cr3" : : "r" (kvirt_to_phys(x86_init_ptbl_l2)));
+
+ // Mark the ktext mapping global now that it's not mapped at address
+ // zero. FIXME: check for and enable PGE
+
+ x86_init_ptbl_l2[0x200] |= 0x100;
+ }
+
+ void map_physmem()
+ {
+ using Mem::get_bootmem;
+
+ // phys_to_ktext can be used for the first
+ // 4MiB-minus-size-of-kernel of bootmem allocations.
+
+ for (uintptr_t physpage = 1; physpage <= (mem_end - 1) / (4096*1024);
+ physpage++)
+ {
+ uintptr_t virtpage = physpage + (PHYSMEM_START >> 22);
+ x86_init_ptbl_l2[virtpage & 1023] = (physpage << 22) | 0x187;
+ }
+
+ size_t pages_size = (mem_end / page_size) * sizeof(Mem::Page);
+ Mem::pages = static_cast<Mem::Page *>(get_bootmem(pages_size, 4));
+ Mem::last_page = Mem::pages + pages_size - 1;
+ bzero(Mem::pages, pages_size);
+
+ int listpos[num_zones];
+
+ for (int i = num_zones - 1; i >= 0; i--) {
+ listpos[i] = zonelist(i);
+ pagezonelists[num_zones - 1 - i] = &pagezonelists_real[listpos[i]];
+
+ u64 rstart = mem_zone_regions[i].start;
+ u64 rend = mem_zone_regions[i].end;
+
+ if (mem_start <= rend && mem_end >= rstart) {
+ if (rstart < mem_start)
+ rstart = mem_start;
+ if (rend > mem_end)
+ rend = mem_end;
+
+ ulong page_start = rstart / page_size;
+ ulong page_len = (rend - rstart + 1) / page_size;
+
+ pagezones[i].init(page_start, page_len);
+
+ for (int j = i; j < num_zones; j++) {
+ assert(listpos[j] < zonelist(j + 1));
+ pagezonelists_real[listpos[j]++] = &pagezones[i];
+ }
+ }
+ }
+ }
+}
+}
--- /dev/null
+// arch/x86/misc.cc -- Misc. arch-specific stuff
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/types.h>
+#include <kern/libc.h>
+#include <kern/arch.h>
+#include <kern/i8259.h>
+#include <kern/time.h>
+#include <kern/thread.h>
+#include <kern/mem.h>
+
+#include <arch/addrs.h>
+#include <arch/multiboot.h>
+
+extern u32 x86_init_ptbl_l2[1024];
+
+struct X86InitStack {
+ u8 stack[4096 - ::Threads::thread_size];
+ ::Threads::Thread thread;
+} __attribute__((aligned(4096))) x86_init_stack;
+
+namespace Arch {
+ namespace Priv {
+ void set_idt();
+
+ void show_regs(u32 *stack) {
+ printf("eax: 0x%08x ecx: 0x%08x edx: 0x%08x ebx: 0x%08x\n"
+ "esp: 0x%08x ebp: 0x%08x esi: 0x%08x edi: 0x%08x\n"
+ "eflags: 0x%08x\n",
+ stack[0],
+ stack[1],
+ stack[2],
+ stack[3],
+ stack[9] & 3 ? stack[11] : (u32)stack + 11 * 4,
+ stack[5],
+ stack[6],
+ stack[7],
+ stack[10]);
+
+ printf("Stack trace: ");
+ u32 *frame = (u32 *)stack[5];
+
+ for (int i = 2; i < 32; i++) {
+ u32 stackptr = frame[1];
+ frame = (u32 *)frame[0];
+
+ if ((u32)frame < 0x80000000UL)
+ break;
+
+ if (!(i % 7))
+ printf("\n");
+
+ printf("0x%08x ", stackptr);
+ }
+ }
+
+ struct TimerInt : public IRQ::Interrupt {
+ bool action()
+ {
+ Time::monotonic_timers->run();
+ return true;
+ }
+ };
+
+ TimerInt timer_int;
+ }
+
+ using IRQ::i8259;
+ ::Threads::Thread *init_thread;
+
+ void arch_init()
+ {
+ init_thread = &x86_init_stack.thread;
+ Priv::early_adjust_mappings();
+ Priv::set_idt();
+ Priv::MultiBoot::process_info();
+ i8259.init();
+
+ u32 tss_addr = reinterpret_cast<u32>(&Priv::tss);
+ x86_gdt[3].base_low = tss_addr & 0xffff;
+ x86_gdt[3].base_mid = (tss_addr & 0xff0000) >> 16;
+ x86_gdt[3].base_high = (tss_addr & 0xff000000) >> 24;
+
+ Priv::tss.ss0 = 8;
+ asm volatile("ltr %w0" : : "r" (0x18) : "memory");
+ init_thread->addr_space = new Mem::AddrSpace(x86_init_ptbl_l2);
+ init_thread->active_addr_space = init_thread->addr_space;
+ }
+
+ void timer_init()
+ {
+ IRQ::InterruptSlot *timer = i8259.get_slot(0);
+ i8259.request_int(timer, &Priv::timer_int);
+ }
+
+ void ArchThread::init(void *entry, void *arg)
+ {
+ void **stack = reinterpret_cast<void **>(this);
+
+ *--stack = arg;
+ *--stack = entry;
+
+ esp = stack;
+ ebp = 0;
+ jump_to_init = 1;
+ }
+}
+
+using Arch::Priv::show_regs;
+
+extern "C" void x86_do_diverr(u32 *stack)
+{
+ printf("Division error at 0x%04x:0x%08x\n", stack[9], stack[8]);
+ show_regs(stack);
+ for(;;);
+}
+
+extern "C" void x86_do_debug(u32 *stack)
+{
+ printf("Debug exception at 0x%04x:0x%08x\n", stack[9], stack[8]);
+ show_regs(stack);
+ for(;;);
+}
+
+extern "C" void x86_do_breakpoint(u32 *stack)
+{
+ printf("Breakpoint at 0x%04x:0x%08x\n", stack[9], stack[8]);
+ show_regs(stack);
+ for(;;);
+}
+
+extern "C" void x86_do_invalid_insn(u32 *stack)
+{
+ printf("Invalid instruction at 0x%04x:0x%08x\n", stack[9], stack[8]);
+ show_regs(stack);
+ for(;;);
+}
+
+extern "C" void x86_do_page_fault(u32 *stack, u32 fault_addr, u32 error_code)
+{
+ Mem::AddrSpace *as;
+
+ if (in_fault)
+ for(;;);
+
+ // A reserved bit was set in the PTE; this is always a bug.
+ if (error_code & 8)
+ goto bad_fault;
+
+ // Don't try to fix up a page fault if interrupts were disabled. It is an
+ // error to access non-locked pages with interrupts disabled. Trying to
+ // fix it up in the case of an access that would be legitimate if interrupts
+ // were enabled would simply mask the loss of atomicity, and trying to grab
+ // locks to look up the address if it is a completely bad reference won't
+ // accomplish much other than decreasing the odds that the fault message
+ // gets out.
+
+ if (!(stack[10] & 0x200))
+ goto bad_fault;
+
+ // Don't allow fault-ins using a borrowed addr-space.
+ as = curthread->addr_space;
+
+ if (!as || curthread == Arch::init_thread)
+ goto bad_fault;
+
+ ll_ints_on();
+
+ // FIXME: no-exec
+ if (as->handle_fault(fault_addr, error_code & 2,
+ false /* error_code & 16 */, error_code & 4))
+ return;
+
+ // FIXME: throw exception to user
+
+bad_fault:
+ ll_ints_off();
+ in_fault++;
+
+ printf("Page fault at 0x%04x:0x%08x for 0x%08x, error code: 0x%04x\n",
+ stack[9], stack[8], fault_addr, error_code);
+
+ show_regs(stack);
+
+ for(;;);
+}
+
+extern "C" void x86_do_gpf(u32 *stack, u32 error_code)
+{
+ if (in_fault)
+ for(;;);
+
+ in_fault++;
+
+ printf("General protection fault at 0x%04x:0x%08x, error code: 0x%04x\n",
+ stack[9], stack[8], error_code);
+
+ show_regs(stack);
+
+ for(;;);
+}
+
+extern "C" void x86_do_irq(int irq)
+{
+ IRQ::i8259.handle_irq(irq - 0x20);
+}
--- /dev/null
+// arch/x86/multiboot.cc -- Code to interpret multiboot data and initialize
+// free memory regions based thereupon.
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/libc.h>
+#include <kern/pagealloc.h>
+
+#include <arch/multiboot.h>
+#include <arch/addrs.h>
+#include <arch/paging.h>
+#include <arch/mem.h>
+
+#include <util/misc.h>
+#include <limits.h>
+
+u32 x86_boot_info_phys;
+extern int _start;
+
+namespace Arch {
+namespace Priv {
+namespace MultiBoot {
+ BootInfo *boot_info;
+
+ void parse_mmap()
+ {
+ }
+
+ using Util::round_up;
+ using Util::round_down;
+
+ void make_mem_avail(u64 start, u64 end)
+ {
+ using Mem::pages;
+
+ if (start > end)
+ return;
+
+ for (int i = 0; i < num_zones; i++) {
+ u64 rstart = mem_zone_regions[i].start;
+ u64 rend = mem_zone_regions[i].end;
+
+ if (start <= rend && end >= rstart) {
+ if (rstart < start)
+ rstart = start;
+ if (rend > end)
+ rend = end;
+
+ ulong page_len = (rend - rstart + 1) / page_size;
+ pagezones[i].free(Mem::phys_to_page(rstart), page_len);
+ }
+ }
+ }
+
+ void process_info()
+ {
+ if (x86_boot_info_phys > max_ktext_map) {
+ printf("Cannot access boot info at %#.8x\n", x86_boot_info_phys);
+ for(;;);
+ }
+
+ boot_info = (BootInfo *)phys_to_kvirt(x86_boot_info_phys);
+ mem_end = 0;
+
+ if (!(boot_info->flags & (1 << BootInfo::flag_mmap))) {
+ // FIXME: use mem_lower and mem_upper in this case.
+ printf("MultiBoot info does not contain a memory map.\n");
+ for(;;);
+ }
+
+ printf("BIOS Memory Map:\n");
+
+ uint off = 0;
+ while (off < boot_info->mmap_length) {
+ u32 phys = boot_info->mmap_addr + off;
+
+ if (phys > max_ktext_map) {
+ printf("Cannot access BIOS memory map entry at %#.8x\n", phys);
+ for(;;);
+ }
+
+ MemMap *mmap = (MemMap *)phys_to_kvirt(phys);
+
+ printf("0x%016llx - 0x%016llx, type %d\n",
+ mmap->base, mmap->base + mmap->len - 1, mmap->type);
+
+ off += mmap->size + 4;
+
+ if (mmap->type == MemMap::Available) {
+ u64 end = mmap->base + mmap->len;
+
+ if (end > mem_end)
+ mem_end = end;
+ }
+ }
+
+ map_physmem();
+
+ // Don't make available any pages that overlap the
+ // kernel text/data or bootmem allocations.
+
+ u64 kernelstart = round_down(kvirt_to_phys(&_start), page_shift);
+ u64 kernelend = round_up(kvirt_to_phys((void *)next_free_bootmem),
+ page_shift) - 1;
+
+ off = 0;
+ while (off < boot_info->mmap_length) {
+ u32 phys = boot_info->mmap_addr + off;
+ MemMap *mmap = (MemMap *)phys_to_kvirt(phys);
+ off += mmap->size + 4;
+
+ if (mmap->type == MemMap::Available) {
+ // Don't use any page that isn't fully in this entry.
+
+ u64 start = round_up(mmap->base, page_shift);
+ u64 end = round_down(mmap->base + mmap->len, page_shift) - 1;
+
+ // Don't overwrite page 1; the BIOS will need it for later calls in
+ // emulation, and at least the BIOS I'm testing on doesn't mark
+ // it as reserved.
+
+ if (start < page_size)
+ start = page_size;
+
+ if (start <= kernelend && end >= kernelstart) {
+ if (start < kernelstart)
+ make_mem_avail(start, kernelstart - 1);
+
+ if (end > kernelend)
+ make_mem_avail(kernelend + 1, end);
+ } else {
+ make_mem_avail(start, end);
+ }
+ }
+ }
+ }
+}
+}
+}
--- /dev/null
+// arch/x86/thread.cc -- Thread switching
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/thread.h>
+#include <kern/mem.h>
+#include <kern/pagetable.h>
+
+namespace Arch {
+ void set_aspace(Mem::AddrSpace *aspace)
+ {
+ u32 cr3 = Mem::kvirt_to_phys(aspace->page_table->toplevel);
+ asm volatile("movl %0, %%cr3" : : "r" (cr3) : "memory");
+ }
+
+ void switch_thread(Threads::Thread *dest, Threads::Thread *src)
+ {
+ u32 dummy1, dummy2;
+
+ if (dest->addr_space) {
+ assert(dest->addr_space == dest->active_addr_space);
+
+ if (dest->addr_space != src->active_addr_space)
+ set_aspace(dest->addr_space);
+ } else {
+ dest->active_addr_space = src->active_addr_space;
+ }
+
+ Priv::tss.esp0 = reinterpret_cast<u32>(dest);
+
+ asm volatile("movl %%esp, (%0);"
+ "movl %%ebp, 4(%0);"
+ "movb $0, 8(%0);"
+ "movb 8(%1), %%al;"
+ "cmpb $0, %%al;"
+ "movl (%1), %%esp;"
+ "movl 4(%1), %%ebp;"
+ "jnz x86_new_thread;" :
+ "=a" (dummy1), "=c" (dummy2) :
+ "0" (&src->arch.esp), "1" (&dest->arch.esp) :
+ "ebx", "edx", "esi", "edi", "memory");
+ }
+}
--- /dev/null
+DIR := core/
+DIRS += $(DIR)
+
+RAW_CXXFILES := init irq time misc thread lock
+CXXFILES += $(RAW_CXXFILES:%=$(DIR)%)
--- /dev/null
+// core/event.cc -- EventDispatcher
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+
+#include <kern/event.h>
+
+namespace Event {
+ void EventTrigger::notify(VStruct *info)
+ {
+ }
+
+ void EventDispatcher::setup_trigger(Event *event, Notifier *trigger)
+ {
+ }
+
+ void EventDispatcher::setup_sync_trigger(Event *event, SyncNotifier *trigger)
+ {
+ }
+
+ class KFuncTrigger : public EventTrigger
+ {
+
+ }
+
+ void EventDispatcher::setup_kfunc_event(KFunc func, SyncNotifier *trigger)
+ {
+
+ }
+}
--- /dev/null
+class Event.EventDispatcher : System.Events.EventDispatcher;
+class Event.EventTrigger : System.Notifiers.Notifier;
+class Event.SyncEventTrigger : System.Notifiers.SyncNotifier;
--- /dev/null
+// core/init.cc -- C++ entry point and component initialization.
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/types.h>
+#include <kern/libc.h>
+#include <kern/console.h>
+#include <kern/arch.h>
+#include <kern/time.h>
+#include <kern/thread.h>
+
+extern void *eh_frame_begin;
+extern "C" void __register_frame(const void *begin);
+
+void run_test();
+
+extern "C" void start_kernel()
+{
+ run_ctors();
+ Arch::arch_init();
+
+ // __register_frame must not be called until after dynamic memory
+ // allocation is initialized (exceptions wouldn't work before then,
+ // anyway; if one is thrown, abort() will be called).
+
+ __register_frame(&eh_frame_begin);
+ printf("Starting kernel...\n");
+
+ Time::init();
+ Threads::sched.init();
+ ll_ints_on();
+
+ run_test();
+
+ printf("done.\n");
+ for(;;);
+}
--- /dev/null
+// core/irq.cc -- generic IRQ handling
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+
+#include <kern/kernel.h>
+#include <kern/irq.h>
+
+namespace IRQ {
+ void InterruptController::request_int(InterruptSlot *slot, Interrupt *irq)
+ {
+ AutoSpinLockIRQ autolock(lock);
+
+ u32 irqnum = get_irqnum(slot);
+ Interrupt *list = slot->first_int;
+ Interrupt **insert = &slot->first_int;
+
+ while (list) {
+ if (list == irq)
+ throw_idl(ResourceBusy, -1, countarray("IRQ in use"));
+
+ insert = &list->next;
+ list = *insert;
+ }
+
+ *insert = irq;
+ irq->slot = slot;
+ irq->controller = this;
+
+ if (insert == &slot->first_int && slot->mask_count == 0) {
+ // There were no existing handlers, so it's currently masked
+ unmask(irqnum);
+ }
+ }
+
+ void InterruptController::free_int(Interrupt *irq)
+ {
+ InterruptSlot *slot = irq->slot;
+ u32 irqnum = get_irqnum(slot);
+
+ DroppableAutoSpinLockIRQ autolock(lock);
+
+ Interrupt *list = slot->first_int;
+ Interrupt **remove = &slot->first_int;
+
+ while (list) {
+ if (list == irq) {
+ *remove = list->next;
+
+ if (remove == &slot->first_int && !list->next &&
+ slot->mask_count == 0)
+ {
+ // It was the last handler for this IRQ num.
+ mask(irqnum);
+ }
+
+ autolock.unlock();
+ wait_for_irq(slot);
+ return;
+ }
+
+ remove = &list->next;
+ list = *remove;
+ }
+
+ throw_idl(InvalidReference, 0, nullarray);
+ }
+
+ bool InterruptController::handle_irq(int irq)
+ {
+ DroppableAutoSpinLock autolock(lock);
+ bool handled_one = false;
+ bool irq_specified = irq >= 0;
+
+ in_irq = true;
+
+ do {
+ if (!irq_specified) {
+ irq = get_pending_irq();
+
+ if (irq < 0)
+ break;
+ }
+
+ mask_and_ack(irq);
+
+ InterruptSlot *slot = get_slot(irq);
+
+ if (slot->mask_count > 0 || slot->flags & InterruptSlot::Running) {
+ slot->flags |= InterruptSlot::Pending;
+ continue;
+ }
+
+ slot->flags |= InterruptSlot::Running;
+ assert(!(slot->flags & InterruptSlot::Pending));
+
+ autolock.unlock();
+
+ for (Interrupt *i = slot->first_int; i; i = i->next)
+ handled_one |= i->action();
+
+ autolock.lock();
+
+ slot->flags &= ~InterruptSlot::Running;
+
+ if (slot->mask_count == 0)
+ unmask(irq);
+ } while (!irq_specified);
+
+ in_irq = false;
+ return handled_one;
+ }
+
+ void InterruptController::rec_mask_nowait(InterruptSlot *slot)
+ {
+ AutoSpinLockIRQ autolock(lock);
+ u32 irq = get_irqnum(slot);
+
+ if (slot->mask_count++ == 0)
+ mask(irq);
+ }
+
+ void InterruptController::rec_mask(InterruptSlot *slot)
+ {
+ rec_mask_nowait(slot);
+ wait_for_irq(slot);
+ }
+
+ void InterruptController::rec_unmask(InterruptSlot *slot)
+ {
+ bool was_pending = false;
+ DroppableAutoSpinLockRecIRQ autolock(lock);
+
+ if (--slot->mask_count == 0) {
+ unmask(get_irqnum(slot));
+
+ if (slot->flags & InterruptSlot::Pending) {
+ was_pending = true;
+ slot->flags &= ~InterruptSlot::Pending;
+ slot->flags |= InterruptSlot::Running;
+ }
+ }
+
+ autolock.unlock();
+
+ if (was_pending) {
+ // Only really necessary for edge-triggered interrupts
+
+ for (Interrupt *i = slot->first_int; i; i = i->next)
+ i->action();
+
+ autolock.lock();
+ slot->flags &= ~InterruptSlot::Running;
+ autolock.unlock();
+ }
+ }
+
+ void InterruptController::wait_for_irq(InterruptSlot *slot)
+ {
+ while (slot->flags & InterruptSlot::Running)
+ ll_busywait();
+ }
+
+ bool in_irq;
+}
+
+//#include "irq-server/footer.cc"
--- /dev/null
+class IRQ.Interrupt : System.IO.Interrupts.Interrupt;
+class IRQ.InterruptController : System.IO.Interrupts.InterruptController;
--- /dev/null
+// core/thread.cc -- Scheduler and thread creation/destruction
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <util/lock.h>
+#include <kern/thread.h>
+
+namespace Lock {
+ // OPT: add inline, lockless uncontended case
+
+ // FIXME: Allow a high-priority blocker to steal a lock from
+ // a low-priority holder if the holder has not yet run. This
+ // prevents rescheduling every iteration if both threads are
+ // repeatedly acquiring and releasing the lock.
+
+ void Lock::lock()
+ {
+ DroppableAutoSpinLockRecIRQ autolock(spinlock);
+
+ if (!lockval) {
+ lockval = reinterpret_cast<ulong>(curthread);
+ return;
+ }
+
+ assert(lockval != reinterpret_cast<ulong>(curthread));
+
+ Threads::ThreadBlocker blocker(curthread);
+ waitqueue.block(&blocker);
+
+ if (!lockval) {
+ lockval = reinterpret_cast<ulong>(curthread);
+ waitqueue.unblock(&blocker);
+ return;
+ }
+
+ autolock.unlock();
+ curthread->block(&blocker);
+
+ // FIXME: interruptible locks
+ assert(lockval == reinterpret_cast<ulong>(curthread));
+ }
+
+ void Lock::unlock()
+ {
+ AutoSpinLockRecIRQ autolock(spinlock);
+ assert(lockval == reinterpret_cast<ulong>(curthread));
+
+ if (waitqueue.empty()) {
+ lockval = 0;
+ return;
+ }
+
+ Threads::Blocker *b = waitqueue.unblock_one();
+ lockval = reinterpret_cast<ulong>(b->thread);
+ b->wake();
+ }
+}
--- /dev/null
+int in_fault;
--- /dev/null
+// core/thread.cc -- Scheduler and thread creation/destruction
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/thread.h>
+#include <kern/pagealloc.h>
+#include <kern/time.h>
+#include <kern/arch.h>
+#include <kern/irq.h>
+#include <lowlevel/bitops.h>
+
+using namespace Lock;
+
+namespace Threads {
+ Thread *Sched::best_rt(int prio)
+ {
+ Util::List *rq = &rt_runqueue[prio];
+ assert(!rq->empty());
+
+ return rq->next->listentry(Thread, runqueue_node);
+ }
+
+ void Sched::replenish_all()
+ {
+ // All runnable tasks have depleted timeslices, so mark
+ // all depleted prios active. Replenishment will happen
+ // lazily.
+
+ last_replenish++;
+ ts_bitmap = ts_depleted_bitmap;
+ ts_depleted_bitmap = 0;
+ }
+
+ void Sched::replenish_prio(int prio)
+ {
+ // Move the depleted marker to the end, effectively moving
+ // all other tasks to the active list at once.
+
+ ts_depleted[prio].del();
+ ts_runqueue[prio].add_back(&ts_depleted[prio]);
+ }
+
+ void Thread::ts_add()
+ {
+ assert(runqueue_node.empty());
+
+ if (time_left) {
+ // This puts it at the end of the active list, not depleted.
+ sched.ts_depleted[ts_prio].add_back(&runqueue_node);
+ } else {
+ sched.ts_runqueue[ts_prio].add_back(&runqueue_node);
+ }
+
+ sched.ts_bitmap |= 1 << (Sched::ts_prios - 1 - ts_prio);
+ }
+
+ void Thread::ts_del()
+ {
+ runqueue_node.del();
+
+ if (sched.ts_runqueue[ts_prio].next == &sched.ts_depleted[ts_prio])
+ sched.ts_bitmap &= ~(1 << (Sched::ts_prios - 1 - ts_prio));
+ if (sched.ts_depleted[ts_prio].next == &sched.ts_runqueue[ts_prio])
+ sched.ts_depleted_bitmap &= ~(1 << (Sched::ts_prios - 1 - ts_prio));
+ }
+
+ void Thread::ts_deplete()
+ {
+ runqueue_node.del();
+
+ if (sched.ts_runqueue[ts_prio].next == &sched.ts_depleted[ts_prio])
+ sched.ts_bitmap &= ~(1 << (Sched::ts_prios - 1 - ts_prio));
+
+ // This puts it at the end of the depleted list, not active.
+ sched.ts_runqueue[ts_prio].add_back(&runqueue_node);
+ sched.ts_depleted_bitmap |= 1 << (Sched::ts_prios - 1 - ts_prio);
+ }
+
+ inline void Thread::add()
+ {
+ if (policy == TimeShared) {
+ prio_adjust();
+ ts_add();
+ } else {
+ sched.rt_runqueue[rt_prio].add_back(&runqueue_node);
+ }
+
+ ll_multiword_set_bit(sched.bitmap, Sched::rt_prios - 1 - rt_prio);
+ }
+
+ inline void Thread::del()
+ {
+ if (policy == TimeShared) {
+ ts_del();
+
+ if (!sched.ts_bitmap && !sched.ts_depleted_bitmap)
+ ll_multiword_clear_bit(sched.bitmap, Sched::rt_prios - 1);
+ } else {
+ runqueue_node.del();
+
+ if (sched.rt_runqueue[rt_prio].empty())
+ ll_multiword_clear_bit(sched.bitmap, Sched::rt_prios - 1 - rt_prio);
+ }
+ }
+
+ inline u32 Sched::prio_to_slice(int prio)
+ {
+ assert(prio >= 1 && prio < ts_prios);
+ return prio * default_timeslice / 8;
+ }
+
+ int Sched::slice_to_prio(u32 slice)
+ {
+ return slice * 8 / default_timeslice;
+ }
+
+ void Thread::prio_adjust()
+ {
+ assert(runqueue_node.empty());
+
+ if (last_replenish != sched.last_replenish) {
+ if (sched.last_replenish - last_replenish > 3) {
+ time_left = time_slice * 2 - 1;
+ } else while (sched.last_replenish != last_replenish) {
+ time_left = time_left / 2 + time_slice;
+ assert(time_left < (s32)time_slice * 2);
+ last_replenish++;
+ }
+
+ last_replenish = sched.last_replenish;
+
+ int new_prio = Sched::slice_to_prio(time_left);
+
+ if (!(new_prio >= ts_static_prio && new_prio < Sched::ts_prios)) {
+ printf("new prio %d, static %d, time left %d\n",
+ new_prio, ts_static_prio, time_left);
+ }
+
+ assert(new_prio >= ts_static_prio && new_prio < Sched::ts_prios);
+ ts_prio = new_prio;
+ }
+ }
+
+ void Thread::replenish()
+ {
+ assert(!runqueue_node.empty());
+
+ if (last_replenish != sched.last_replenish) {
+ assert(time_left == 0);
+ time_left = time_slice;
+ last_replenish = sched.last_replenish;
+
+ if (ts_static_prio != ts_prio) {
+ assert(ts_static_prio < ts_prio);
+ ts_del();
+ ts_prio = ts_static_prio;
+ ts_add();
+ }
+ }
+ }
+
+ Thread *Sched::best_ts()
+ {
+ if (!ts_bitmap)
+ replenish_all();
+
+ assert(ts_bitmap);
+ int best = ts_prios - 1 - ll_ffs(ts_bitmap);
+ assert(best >= 1 && best < ts_prios);
+
+ if (ts_runqueue[best].next == &ts_depleted[best])
+ replenish_prio(best);
+
+ assert(ts_runqueue[best].next != &ts_depleted[best]);
+ assert(!ts_runqueue[best].empty());
+ Thread *t = ts_runqueue[best].next->listentry(Thread, runqueue_node);
+
+ assert(!t->blocked_on);
+ assert(t->policy == Thread::TimeShared);
+ assert(t->rt_prio == 0);
+ assert(t->ts_prio == best);
+
+ // The replenish can lower the threads priority if it was boosted;
+ // in some cases, this may mean that a different thread is now the
+ // highest priority thread. As these aren't real-time threads, and
+ // as priorities are mainly to determine which threads can take the
+ // CPU immediately on wakeup rather than which CPU hog goes first,
+ // it's not important enough to worry about; the new priority will
+ // be used on the next schedule.
+
+ t->replenish();
+ return t;
+ }
+
+ void Sched::schedule_nolock()
+ {
+ assert(!IRQ::in_irq);
+ need_resched = 0;
+ int rt = rt_prios - 1 - ll_multiword_ffs(bitmap, 0, rt_prios);
+ Thread *best;
+
+ Time::Time now;
+ Time::monotonic_clock.get_time(&now);
+
+ if (curthread != Arch::init_thread)
+ curthread->charge(now);
+
+ if (rt == rt_prios)
+ best = Arch::init_thread;
+ else if (rt == 0)
+ best = best_ts();
+ else
+ best = best_rt(rt);
+
+ if (best != curthread) {
+ if (best != Arch::init_thread) {
+ best->last_time = now;
+ Time::Time expiry = now + best->time_left;
+ resched_timer.arm(expiry);
+ }
+
+ Arch::switch_thread(best, curthread);
+ }
+ }
+
+ void Sched::schedule()
+ {
+ AutoSpinLockRecIRQ autolock(runqueue_lock);
+ schedule_nolock();
+ }
+
+ void Sched::sched_new_thread()
+ {
+ runqueue_lock.unlock_irq();
+ }
+
+ Thread *Sched::new_thread(thread_func func, void *arg, char *name)
+ {
+ // Allocate a page for the thread's stack, and stick the thread
+ // struct at the top of the stack. It's placed at the top rather
+ // than the bottom to facilitate the possible eventual use of guard
+ // pages.
+
+ Mem::Page *page = Mem::PageAlloc::alloc(1);
+ ulong addr = reinterpret_cast<ulong>(Mem::page_to_kvirt(page));
+ addr += Arch::ArchThread::size - thread_size;
+
+ Thread *t = reinterpret_cast<Thread *>(addr);
+
+ // Use placement new to run the constructors of lists
+ // (and any other self-constructing type that may be
+ // added).
+
+ new(t) Thread;
+
+ // FIXME: settable schedparams
+
+ t->time_left = Sched::default_timeslice;
+ t->policy = Thread::TimeShared;
+ t->rt_prio = 0;
+ t->ts_static_prio = 8;
+ t->ts_prio = 8;
+ t->time_slice = prio_to_slice(t->ts_prio);
+ t->blocked_on = NULL;
+ t->last_replenish = 0;
+ t->addr_space = NULL;
+ t->active_addr_space = NULL;
+
+ t->arch.init(reinterpret_cast<void *>(func), arg);
+
+ if (name)
+ strncpy(t->name, name, Thread::name_len);
+
+ threadlist_lock.lock_irq();
+ threadlist.add_back(&t->threadlist_node);
+ threadlist_lock.unlock_irq();
+
+ return t;
+ }
+
+ static void do_resched_timer(Time::KTimerEntry *timer)
+ {
+ need_resched = 1;
+ }
+
+ void Sched::init()
+ {
+ assert(curthread == Arch::init_thread);
+
+ resched_timer.mux = Time::monotonic_timers;
+ resched_timer.func = do_resched_timer;
+ resched_timer.data = NULL;
+
+ for (int i = 0; i < ts_prios; i++)
+ ts_runqueue[i].add_front(&ts_depleted[i]);
+
+ strcpy(curthread->name, "idle thread");
+ }
+
+ Thread::~Thread()
+ {
+ sched.threadlist_lock.lock_irq();
+ threadlist_node.del();
+ sched.threadlist_lock.unlock_irq();
+
+ ulong addr = reinterpret_cast<ulong>(this);
+ addr &= ~(Arch::ArchThread::size - 1);
+ Mem::Page *page = Mem::kvirt_to_page(reinterpret_cast<void *>(addr));
+ Mem::PageAlloc::free(page, 1);
+ }
+
+ void Thread::exit()
+ {
+ printf("thread %p exiting...\n", this);
+ for(;;);
+ }
+
+ void Thread::block(ThreadBlocker *blocker)
+ {
+ AutoSpinLockRecIRQ autolock(sched.runqueue_lock);
+ assert(!runqueue_node.empty());
+ assert(!blocked_on);
+ assert(!IRQ::in_irq);
+
+ if (blocker->blocked) {
+ blocked_on = blocker;
+ del();
+ assert(runqueue_node.empty());
+ sched.schedule_nolock();
+ }
+ }
+
+ void Thread::wake_nolock()
+ {
+ blocked_on = NULL;
+
+ if (runqueue_node.empty()) {
+ add();
+ need_resched = 1;
+ }
+ }
+
+ void Thread::wake()
+ {
+ ulong irq = sched.runqueue_lock.lock_recirq();
+ wake_nolock();
+ sched.runqueue_lock.unlock_recirq(irq);
+
+ if (ll_get_int_state() && need_resched)
+ sched.schedule();
+ }
+
+ void Thread::charge(Time::Time &now)
+ {
+ Time::Time ival;
+ ival = now - last_time;
+ last_time = now;
+
+ if (ival.seconds != 0) {
+ time_left = 0;
+ } else {
+ time_left -= ival.nanos;
+
+ if (time_left < 0)
+ time_left = 0;
+ }
+
+ if (!blocked_on && time_left == 0 && policy != FIFO) {
+ if (policy == TimeShared) {
+ ts_deplete();
+ } else {
+ del();
+ add();
+ }
+ }
+ }
+
+ void Thread::set_aspace(Mem::AddrSpace *aspace)
+ {
+ // FIXME: lock thread against scheduling; this temporary method should
+ // be gone before SMP anyway.
+
+ ll_ints_off();
+ addr_space = active_addr_space = aspace;
+ Arch::set_aspace(aspace);
+ ll_ints_on();
+ }
+
+ void ThreadBlocker::wake()
+ {
+ blocked = false;
+ thread->wake();
+ }
+
+ void CascadeBlocker::wake()
+ {
+ blocked = false;
+ blocker->wake();
+ }
+
+ void ThreadBlocker::unblock()
+ {
+ blocked = false;
+ }
+
+ void CascadeBlocker::unblock()
+ {
+ blocked = false;
+ blocker->unblock();
+ }
+
+ void WaitQueue::block(Blocker *blocker)
+ {
+ AutoSpinLockRecIRQ autolock(lock);
+ blockers.add_back(&blocker->list_node);
+ }
+
+ void WaitQueue::unblock(Blocker *blocker)
+ {
+ AutoSpinLockRecIRQ autolock(lock);
+ blocker->list_node.del();
+ }
+
+ bool WaitQueue::empty()
+ {
+ AutoSpinLockRecIRQ autolock(lock);
+ return blockers.empty();
+ }
+
+ Blocker *WaitQueue::unblock_one_nolock()
+ {
+ AutoSpinLock schedautolock(sched.runqueue_lock);
+
+ int best_rt = -1;
+ int best_ts = 0;
+ Blocker *best = NULL;
+
+ for (Util::List *node = blockers.next; node != &blockers;
+ node = node->next)
+ {
+ Blocker *b = node->listentry(Blocker, list_node);
+ Thread *t = b->thread;
+
+ if (best_rt < t->rt_prio) {
+ best_rt = t->rt_prio;
+ best_ts = t->ts_prio;
+ best = b;
+ }
+
+ if (best_rt == t->rt_prio && best_rt == 0 && best_ts < t->ts_prio) {
+ best_ts = t->ts_prio;
+ best = b;
+ }
+ }
+
+ if (best)
+ best->list_node.del();
+
+ return best;
+ }
+
+ Blocker *WaitQueue::unblock_one()
+ {
+ AutoSpinLockRecIRQ autolock(lock);
+ return unblock_one_nolock();
+ }
+
+ bool WaitQueue::wake_one()
+ {
+ // The lock is held over the wake to make sure that we still
+ // have a valid reference to best. For external calls to
+ // unblock_one(), the caller must use its own locks (or other
+ // mechanisms) to ensure that the blocker is still valid.
+
+ ulong irq = lock.lock_recirq();
+ Blocker *best = unblock_one_nolock();
+
+ if (best)
+ best->wake();
+
+ lock.unlock_recirq(irq);
+
+ if (ll_get_int_state() && need_resched)
+ sched.schedule();
+
+ return false;
+ }
+
+ int WaitQueue::wake_all()
+ {
+ ulong irq = lock.lock_recirq();
+ sched.runqueue_lock.lock();
+ int count = 0;
+
+ for (Util::List *node = blockers.next; node != &blockers;
+ node = node->next)
+ {
+ Blocker *b = node->listentry(Blocker, list_node);
+ b->unblock();
+ b->thread->wake_nolock();
+ b->list_node.del();
+ count++;
+ }
+
+ sched.runqueue_lock.unlock();
+ lock.unlock_recirq(irq);
+
+ if (ll_get_int_state() && need_resched)
+ sched.schedule();
+
+ return count;
+ }
+
+ Sched sched;
+}
+
+extern "C" void exit_thread()
+{
+ curthread->exit();
+ assertl(0, Assert::Always);
+}
+
+extern "C" void sched_new_thread()
+{
+ Threads::sched.sched_new_thread();
+}
+
+extern "C" void schedule()
+{
+ Threads::sched.schedule();
+}
+
+int need_resched;
--- /dev/null
+// core/time.cc -- Clocks and timers
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/arch.h>
+#include <kern/time.h>
+#include <lowlevel/clock.h>
+#include <System/Exceptions.h>
+
+namespace Time {
+ using namespace Lock;
+
+ void MonotonicClock::get_time(Time *time)
+ {
+ if (!time)
+ throw_idl(InvalidArgument, 0, nullarray);
+
+ AutoSpinLockRecIRQ autolock(lock);
+ s32 nanos = get_nanos(ll_getclock() - last_tick_llclock);
+
+ *time = last_tick_time;
+ time->nanos += nanos;
+
+ while (time->nanos >= 1000000000) {
+ time->nanos -= 1000000000;
+ time->seconds++;
+ }
+ }
+
+ void MonotonicClock::get_resolution(Time *res)
+ {
+ if (!res)
+ throw_idl(InvalidArgument, 0, nullarray);
+
+ res->seconds = 0;
+
+ if (llclocks_per_second >= 1000000000)
+ res->nanos = 1;
+ else
+ res->nanos = 1000000000 / llclocks_per_second;
+ }
+
+ void MonotonicClock::tick()
+ {
+ unsigned long irq = lock.lock_recirq();
+
+ while (ll_getclock() - last_tick_llclock > llclocks_per_tick) {
+ last_tick_llclock += llclocks_per_tick;
+ last_tick_time += clock_tick_increment;
+ }
+
+ Time next_tick = last_tick_time + clock_tick_increment;
+ lock.unlock_recirq(irq);
+
+ tick_timer.arm(next_tick);
+ }
+
+ void MonotonicClock::new_timer(Timer *timer)
+ {
+ if (!timer)
+ throw_idl(InvalidArgument, 0, nullarray);
+
+ // FIXME
+ *timer = NULL;
+ return;
+ }
+
+ void MonotonicClock::timer_tick(KTimerEntry *entry)
+ {
+ MonotonicClock *clock = static_cast<MonotonicClock *>(entry->data);
+ clock->tick();
+ }
+
+ void MonotonicClock::init_tick_timer()
+ {
+ tick_timer.mux = monotonic_timers;
+ tick_timer.func = timer_tick;
+ tick_timer.data = this;
+
+ last_tick_llclock = ll_getclock();
+ last_tick_time.seconds = 0;
+ last_tick_time.nanos = 0;
+
+ Time next_tick = clock_tick_increment;
+ tick_timer.arm(next_tick);
+
+ Arch::timer_init();
+ }
+
+ void HWTimer::get_expiry(Time *EXPIRY, uint8_t *ARMED)
+ {
+ if (EXPIRY)
+ *EXPIRY = expiry;
+ if (ARMED)
+ *ARMED = armed;
+ }
+
+ MonotonicClock monotonic_clock;
+ TimerMux *monotonic_timers;
+ HWTimer *hw_timer;
+
+ void init()
+ {
+ hw_timer->calibrate();
+ monotonic_timers = new TimerMux(monotonic_clock, *hw_timer);
+ monotonic_clock.init_tick_timer();
+ }
+}
+
+#include <servers/core/time/footer.cc>
--- /dev/null
+class Time.MonotonicClock : System.Time.Clock, System.Time.TimerFactory;
+class Time.HWTimer : System.Time.Timer;
--- /dev/null
+This directory is for "private" kernel interfaces. They may be used
+by user code, but are more likely to change or disappear than the
+standard System interfaces. Use of these in user code should be
+limited to debugging and low-level code (where it is accepted that it
+will probably need to change to accommodate new OS versions).
--- /dev/null
+#ifndef _ARCH_ADDRS_H
+#define _ARCH_ADDRS_H
+
+#define KERNEL_START 0xffffffff80000000
+#define PHYSMEM_START 0xffff800000000000
+
+#ifdef __cplusplus
+#include <kern/types.h>
+
+// GCC can't currently handle full 64-bit code relocations
+// on x64, so the code is mapped separately from the
+// all-of-RAM mapping. This means that there are two different
+// kernel-virtual addresses for some physical addresses.
+
+namespace Arch {
+ static inline ulong kvirt_to_phys(void *addr)
+ {
+ ulong ret = reinterpret_cast<ulong>(addr);
+
+ if (ret > KERNEL_START)
+ return ret - KERNEL_START;
+ else
+ return ret - PHYSMEM_START;
+ }
+
+ static inline void *phys_to_kvirt(ulong addr)
+ {
+ return reinterpret_cast<void *>(addr + PHYSMEM_START);
+ }
+
+ namespace Priv {
+ static const ulong max_ktext_map = 4 * 1024 * 1024;
+
+ static inline void *phys_to_ktext(ulong addr)
+ {
+ return reinterpret_cast<void *>(addr + KERNEL_START);
+ }
+ }
+};
+
+#endif
+#endif
--- /dev/null
+#ifndef _ARCH_CONF_H
+#define _ARCH_CONF_H
+
+#endif
--- /dev/null
+// This file should only be included by include/kern/thread.h.
+
+#ifndef _ARCH_CURRENT_H
+#define _ARCH_CURRENT_H
+
+namespace Arch {
+ static inline Threads::Thread *get_current_thread() __attribute__((const));
+
+ static inline Threads::Thread *get_current_thread()
+ {
+ u64 ret;
+
+ asm("movq %%rsp, %0;"
+ "andq $~0xfff, %0" :
+ "=r" (ret));
+
+ ret += ArchThread::size - Threads::thread_size;
+
+ return reinterpret_cast<Threads::Thread *>(ret);
+ }
+}
+
+#endif
--- /dev/null
+#ifndef _ARCH_MEM_H
+#define _ARCH_MEM_H
+
+#include <kern/types.h>
+#include <arch/paging.h>
+
+extern int _end;
+
+namespace Mem {
+ class PageAllocZone;
+};
+
+namespace Arch {
+ // First and last+1 pages of memory, as page numbers. Small holes are
+ // ignored; large holes will require discontiguous memory support.
+
+ static const size_t mem_start = 0;
+ extern size_t mem_end;
+
+ static const uintptr_t highzonestart = 4UL*1024*1024*1024 / page_size;
+ static const uintptr_t dma32zonestart = 16*1024*1024 / page_size;
+
+ extern Mem::PageAllocZone *pagezones[];
+ extern Mem::PageAllocZone **pagezonelists[3];
+
+ extern uintptr_t next_free_bootmem;
+
+ namespace Priv {
+ struct Descriptor {
+ u16 limit_low;
+ u16 base_low;
+ u8 base_mid;
+ u8 type:4;
+ u8 user:1;
+ u8 dpl:2;
+ u8 present:1;
+ u8 limit_high:4;
+ u8 sw:1;
+ u8 code64:1;
+ u8 opsize:1;
+ u8 gran:1;
+ u8 base_high;
+ };
+
+ extern Mem::PageAllocZone isadmazone, dma32zone, highzone;
+
+ void early_adjust_mappings();
+
+ // Call after mem_end is set.
+ void map_physmem();
+ }
+}
+
+extern Arch::Priv::Descriptor x64_gdt[1024];
+
+#endif
--- /dev/null
+#include <arch-x86-common/multiboot.h>
--- /dev/null
+#ifndef _ARCH_PAGETABLE_H
+#define _ARCH_PAGETABLE_H
+
+#include <lowlevel/misc.h>
+#include <lowlevel/atomic.h>
+
+namespace Arch {
+ union PTE {
+ typedef ulong Word;
+
+ struct {
+ Word Valid:1;
+ Word Writeable:1;
+ Word User:1;
+ Word WriteThrough:1;
+ Word CacheDisable:1;
+ Word Accessed:1;
+ Word Dirty:1;
+ Word PageAttrTable:1;
+ Word Global:1;
+ Word Avail1:3;
+ Word Addr:40;
+ Word Avail2:11;
+ Word NoExec:1;
+ };
+
+ Word raw;
+
+ PTE(Word init) : raw(init)
+ {
+ }
+
+ PTE() : raw(0)
+ {
+ }
+
+ operator Word()
+ {
+ return raw;
+ }
+
+ static Word addr_to_offset(Word addr, int shift)
+ {
+ int pages_per_table = page_size / sizeof(Word);
+ return (addr >> shift) & (pages_per_table - 1);
+ }
+
+ Word pte_to_addr()
+ {
+ return raw & 0x000ffffffffff000;
+ }
+
+ static PTE addr_to_pte(Word phys)
+ {
+ return phys & 0x000ffffffffff000;
+ }
+
+ static void flags_to_pte(Mem::PTEFlags flagsin,
+ Mem::PTEFlags maskin,
+ PTE &flagsout,
+ PTE &maskout)
+ {
+ maskout = ~0UL;
+ flagsout = 0;
+
+ if (maskin.Valid) {
+ maskout.Valid = 0;
+ flagsout.Valid = flagsin.Valid;
+ }
+
+ if (maskin.Writeable) {
+ maskout.Writeable = 0;
+ flagsout.Writeable = flagsin.Writeable;
+ }
+
+ // FIXME: check for and enable NX support
+ if (0 && maskin.Executable) {
+ maskout.NoExec = 0;
+ flagsout.NoExec = !flagsin.Executable;
+ }
+
+ if (maskin.User) {
+ maskout.User = 0;
+ flagsout.User = flagsin.User;
+ }
+ }
+
+ Mem::PTEFlags pte_to_flags()
+ {
+ Mem::PTEFlags ret = 0;
+
+ ret.Valid = Valid;
+ ret.User = User;
+
+ if (Valid) {
+ ret.Readable = 1;
+ ret.Writeable = Writeable;
+ ret.Executable = !NoExec;
+ }
+
+ return ret;
+ }
+
+ void set_pte(PTE *table, uint offset)
+ {
+ table[offset] = raw;
+ }
+
+ PTE xchg_pte(PTE *table, uint offset)
+ {
+ return ll_xchg_long(reinterpret_cast<Word *>(&table[offset]), raw);
+ }
+
+ bool valid_pte()
+ {
+ return Valid;
+ }
+
+ bool dirty_pte()
+ {
+ return Dirty;
+ }
+
+ static void invalidate_tlb_entry(Word addr)
+ {
+ ll_invalidate_tlb_entry(addr);
+ }
+
+ enum {
+ page_size = Arch::page_size,
+ page_shift = Arch::page_shift,
+
+ pages_per_table = page_size / sizeof(Word),
+ num_levels = 4,
+ shift_per_level = 9,
+
+ // All "upper" addresses belong to the kernel.
+
+ kmap_start = 256,
+ kmap_end = 511
+ };
+ };
+}
+
+#endif
--- /dev/null
+#ifndef _ARCH_PAGING_H
+#define _ARCH_PAGING_H
+
+#include <System/Mem.h>
+
+#include <kern/types.h>
+#include <arch/addrs.h>
+
+namespace Mem {
+ union PTEFlags;
+};
+
+namespace Arch {
+ static const ulong page_size = 4096;
+ static const int page_shift = 12;
+
+ // x64 does not have cache alias issues.
+ static const ulong page_mapping_min_align = 4096;
+
+ static const ulong user_start = 0;
+ static const ulong user_end = 0x00007fffffffffff;
+}
+
+#endif
--- /dev/null
+#ifndef _ARCH_THREAD_H
+#define _ARCH_THREAD_H
+
+#include <kern/types.h>
+
+namespace Threads {
+ class Thread;
+}
+
+namespace Arch {
+ struct ArchThread {
+ void *rsp, *rbp;
+ u8 jump_to_init;
+
+ enum {
+ size = 4096
+ };
+
+ void init(void *entry, void *arg);
+ };
+
+ void switch_thread(Threads::Thread *dest, Threads::Thread *src);
+
+ namespace Priv {
+ struct TSS {
+ u32 padding;
+ u32 reserved1;
+ u64 rsp[3];
+ u64 reserved2;
+ u64 ist[7];
+ u16 reserved3[5];
+ u16 iomap_base;
+ };
+
+ extern TSS tss;
+ }
+};
+
+#endif
--- /dev/null
+#ifndef _ARCH_X86C_MULTIBOOT_H
+#define _ARCH_X86C_MULTIBOOT_H
+
+#include <kern/types.h>
+
+namespace Arch {
+namespace Priv {
+namespace MultiBoot {
+ struct BootInfo {
+ enum {
+ flag_mem,
+ flag_boot_dev,
+ flag_cmdline,
+ flag_modules,
+ flag_aout_syms,
+ flag_elf_syms,
+ flag_mmap,
+ flag_drives,
+ flag_config,
+ flag_bootloader_name,
+ flag_apm,
+ flag_vbe
+ };
+
+ u32 flags;
+ u32 mem_lower, mem_upper;
+ u32 boot_device;
+ u32 cmdline;
+ u32 mods_count, mods_addr;
+ union {
+ struct {
+ u32 tabsize;
+ u32 strsize;
+ u32 addr;
+ u32 reserved;
+ } aout;
+ struct {
+ u32 num, size, addr, shndx;
+ } elf;
+ };
+ u32 mmap_length, mmap_addr;
+ };
+
+ struct Module {
+ u32 start, end;
+ u32 str;
+ u32 reserved;
+ };
+
+ struct MemMap {
+ u32 size;
+ u64 base;
+ u64 len;
+
+ // Possible values for type:
+ enum {
+ Available = 1,
+ Reserved,
+ ACPIReclaim,
+ ACPINVS
+ };
+
+ u32 type;
+ } __attribute__((packed));
+
+ void process_info();
+}
+}
+}
+
+#endif
--- /dev/null
+#ifndef _ARCH_ADDRS_H
+#define _ARCH_ADDRS_H
+
+#define PHYSMEM_START 0x80000000
+#define KERNEL_START PHYSMEM_START
+
+#ifdef __cplusplus
+#include <kern/types.h>
+
+namespace Arch {
+ static inline ulong kvirt_to_phys(void *addr)
+ {
+ return reinterpret_cast<ulong>(addr) - PHYSMEM_START;
+ }
+
+ static inline void *phys_to_kvirt(ulong addr)
+ {
+ return reinterpret_cast<void *>(addr + PHYSMEM_START);
+ }
+
+ namespace Priv {
+ static const ulong max_ktext_map = 4 * 1024 * 1024;
+ }
+
+ // Allow some room for these regions to grow if needed
+ // (8 pages each should be more than sufficient).
+ static const ulong roshared_map = 0x7fff0000;
+ static const ulong rwshared_map = 0x7fff8000;
+
+ static const ulong stack_bottom = 0x7f000000;
+ static const ulong stack_top = 0x7ffeffff;
+ static const bool stack_grows_up = false;
+};
+
+#endif
+#endif
--- /dev/null
+#ifndef _ARCH_CONF_H
+#define _ARCH_CONF_H
+
+#endif
--- /dev/null
+// This file should only be included by include/kern/thread.h.
+
+#ifndef _ARCH_CURRENT_H
+#define _ARCH_CURRENT_H
+
+namespace Arch {
+ static inline Threads::Thread *get_current_thread() __attribute__((const));
+
+ static inline Threads::Thread *get_current_thread()
+ {
+ u32 ret;
+
+ asm("movl %%esp, %0;"
+ "andl $~0xfff, %0" :
+ "=r" (ret));
+
+ ret += ArchThread::size - Threads::thread_size;
+
+ return reinterpret_cast<Threads::Thread *>(ret);
+ }
+}
+
+#endif
--- /dev/null
+#ifndef _ARCH_MEM_H
+#define _ARCH_MEM_H
+
+#include <kern/types.h>
+#include <arch/paging.h>
+#include <kernel/region.h>
+
+extern int _end;
+
+namespace Mem {
+ class PageAllocZone;
+};
+
+namespace Arch {
+ // First and last+1 pages of memory, as page numbers. Small holes are
+ // ignored; large holes will require discontiguous memory support.
+
+ static const size_t mem_start = 0;
+ extern u64 mem_end;
+
+ extern uintptr_t next_free_bootmem;
+
+ namespace Priv {
+ // Highzone and DMA32Zone aren't currently used, but will be once high
+ // memory support is added (PAE support will also be required for
+ // Highzone). KZone only goes up to 1.5GiB, to allow 512 MiB for
+ // kernel virtual mappings.
+
+ static const int num_zones = 2;
+
+ static const System::Mem::Region mem_zone_regions[num_zones] = {
+ { 0, 16*1024*1024 - 1 }, // ISA DMA zone
+ { 16*1024*1024, 1536*1024*1024 - 1 }, // Kernel direct mappable (KZone)
+#if 0
+ { 1536*1024*1024, 4ULL*1024*1024*1024 - 1 }, // DMA32Zone
+ { 4ULL*1024*1024*1024, 36ULL*1024*1024*1024 - 1} // HighZone
+#endif
+ };
+
+ extern Mem::PageAllocZone pagezones[num_zones];
+ }
+
+ extern Mem::PageAllocZone **pagezonelists[Priv::num_zones];
+
+ namespace Priv {
+ struct Descriptor {
+ u16 limit_low;
+ u16 base_low;
+ u8 base_mid;
+ u8 type:4;
+ u8 user:1;
+ u8 dpl:2;
+ u8 present:1;
+ u8 limit_high:4;
+ u8 sw:1;
+ u8 reserved:1;
+ u8 opsize:1;
+ u8 gran:1;
+ u8 base_high;
+ };
+
+ void early_adjust_mappings();
+
+ // Call after mem_end is set.
+ void map_physmem();
+ }
+}
+
+extern Arch::Priv::Descriptor x86_gdt[1024];
+
+#endif
--- /dev/null
+#include <arch-x86-common/multiboot.h>
--- /dev/null
+// FIXME: PAE support
+
+#ifndef _ARCH_PAGETABLE_H
+#define _ARCH_PAGETABLE_H
+
+#include <lowlevel/misc.h>
+#include <lowlevel/atomic.h>
+
+namespace Arch {
+ union PTE {
+ typedef ulong PhysAddr, VirtAddr;
+ typedef PTE DirPTE;
+
+ struct {
+ PhysAddr Valid:1;
+ PhysAddr Writeable:1; // Low-level write access:
+ // equivalent to PermWrite && !FaultOnWrite
+ PhysAddr User:1;
+ PhysAddr WriteThrough:1;
+ PhysAddr CacheDisable:1;
+ PhysAddr Accessed:1;
+ PhysAddr Dirty:1;
+ PhysAddr PageAttrTable:1;
+ PhysAddr Global:1;
+ PhysAddr FaultOnWrite:1;
+ PhysAddr PermWrite:1; // High-level permission-based write access
+ PhysAddr Avail:1;
+ PhysAddr Addr:20;
+ };
+
+ PhysAddr raw;
+
+ PTE(PhysAddr init) : raw(init)
+ {
+ }
+
+ PTE() : raw(0)
+ {
+ }
+
+ operator PhysAddr()
+ {
+ return raw;
+ }
+
+ static uint addr_to_offset(VirtAddr addr, int shift)
+ {
+ int pages_per_table = page_size / sizeof(PhysAddr);
+ return (addr >> shift) & (pages_per_table - 1);
+ }
+
+ PhysAddr pte_to_addr()
+ {
+ return raw & 0xfffff000;
+ }
+
+ static PTE addr_to_pte(PhysAddr phys)
+ {
+ return phys & 0xfffff000;
+ }
+
+ // DirPTE only
+ void *get_table()
+ {
+ return static_cast<PTE *>(phys_to_kvirt(pte_to_addr()));
+ }
+
+ // DirPTE only
+ static DirPTE set_table(void *addr)
+ {
+ return addr_to_pte(kvirt_to_phys(addr)) | 7;
+ }
+
+ static void flags_to_pte(Mem::PTEFlags flagsin,
+ Mem::PTEFlags maskin,
+ PTE &flagsout,
+ PTE &maskout)
+ {
+ maskout = 0;
+ flagsout = 0;
+
+ if (maskin.Valid) {
+ maskout.Valid = 1;
+ flagsout.Valid = flagsin.Valid;
+ }
+
+ if (maskin.FaultOnWrite) {
+ maskout.FaultOnWrite = 1;
+ flagsout.FaultOnWrite = flagsin.FaultOnWrite;
+ }
+
+ if (maskin.Writeable) {
+ maskout.Writeable = 1;
+ maskout.PermWrite = 1;
+ flagsout.Writeable = flagsin.Writeable;
+ flagsout.PermWrite = flagsin.Writeable;
+
+ // This must be done here if both mask bits are set,
+ // as we may not be going through set_flags in that case.
+
+ if (maskin.FaultOnWrite)
+ flagsout.Writeable &= !flagsout.FaultOnWrite;
+ }
+
+ if (maskin.User) {
+ maskout.User = 1;
+ flagsout.User = flagsin.User;
+ }
+ }
+
+ PTE set_flags(PTE mask, PTE flags)
+ {
+ PTE ret, new_pte;
+
+ do {
+ ret = raw;
+ new_pte = (raw & ~mask) | flags;
+ new_pte.Writeable &= !new_pte.FaultOnWrite;
+ } while (!ll_cmpxchg_long(reinterpret_cast<PhysAddr *>
+ (this), ret, new_pte));
+
+ return ret;
+ }
+
+ Mem::PTEFlags pte_to_flags()
+ {
+ Mem::PTEFlags ret = 0;
+
+ ret.Valid = Valid;
+ ret.User = User;
+
+ if (Valid) {
+ ret.Readable = 1;
+ ret.Writeable = PermWrite;
+ ret.Executable = 1;
+ ret.FaultOnWrite = FaultOnWrite;
+ }
+
+ return ret;
+ }
+
+ void set_pte(PTE *table, uint offset)
+ {
+ table[offset] = raw;
+ }
+
+ PTE xchg_pte(PTE *table, uint offset)
+ {
+ return ll_xchg_long(reinterpret_cast<PhysAddr *>(&table[offset]), raw);
+ }
+
+ bool valid_pte()
+ {
+ return Valid;
+ }
+
+ bool dirty_pte()
+ {
+ return Dirty;
+ }
+
+ enum {
+ page_size = Arch::page_size,
+ page_shift = Arch::page_shift,
+
+ pages_per_table = page_size / sizeof(PhysAddr),
+ num_levels = 2,
+ shift_per_level = 10,
+
+ // All "upper" addresses belong to the kernel.
+
+ kmap_start = 512,
+ kmap_end = 1023
+ };
+ };
+
+ static inline void invalidate_tlb_entry(ulong addr)
+ {
+ ll_invalidate_tlb_entry(addr);
+ }
+}
+
+#endif
--- /dev/null
+#ifndef _ARCH_PAGING_H
+#define _ARCH_PAGING_H
+
+#include <kern/types.h>
+
+namespace Arch {
+ static const ulong page_size = 4096;
+ static const int page_shift = 12;
+
+ // x86 does not have cache alias issues.
+ static const ulong page_mapping_min_align = 4096;
+
+ static const ulong user_start = 0;
+ static const ulong user_end = 0x7fffffff;
+}
+
+#endif
--- /dev/null
+#ifndef _ARCH_THREAD_H
+#define _ARCH_THREAD_H
+
+#include <kern/types.h>
+
+namespace Threads {
+ class Thread;
+}
+
+namespace Mem {
+ class AddrSpace;
+}
+
+namespace Arch {
+ struct ArchThread {
+ void *esp, *ebp;
+ u8 jump_to_init;
+
+ enum {
+ size = 4096
+ };
+
+ void init(void *entry, void *arg);
+ };
+
+ void switch_thread(Threads::Thread *dest, Threads::Thread *src);
+ void set_aspace(Mem::AddrSpace *aspace);
+
+ namespace Priv {
+ struct TSS {
+ u16 prev_task;
+ u16 reserved1;
+ u32 esp0;
+ u16 ss0;
+ u16 reserved2;
+ u32 esp1;
+ u16 ss1;
+ u16 reserved3;
+ u32 esp2;
+ u16 ss2;
+ u16 reserved4;
+ u32 cr3, eip, eflags;
+ u32 eax, ecx, edx, ebx, esp, ebp, esi, edi;
+ u16 es;
+ u16 reserved5;
+ u16 cs;
+ u16 reserved6;
+ u16 ss;
+ u16 reserved7;
+ u16 ds;
+ u16 reserved8;
+ u16 fs;
+ u16 reserved9;
+ u16 gs;
+ u16 reserved10;
+ u16 ldt;
+ u16 reserved11;
+ u16 trap;
+ u16 iomap_base;
+ };
+
+ extern TSS tss;
+ }
+};
+
+#endif
--- /dev/null
+#ifndef _ARCH_USERCOPY_H
+#define _ARCH_USERCOPY_H
+
+#include <kern/types.h>
+#include <kern/libc.h>
+
+namespace Arch {
+ template <typename T>
+ static inline T copyin(T *ptr)
+ {
+ // FIXME
+ return *ptr;
+ }
+
+ template <typename T>
+ static inline void copyin(T *ptr, T &data)
+ {
+ // FIXME
+ data = *ptr;
+ }
+
+ template <typename T>
+ static inline void copyin(T *ptr, T *data, int count)
+ {
+ // FIXME
+ memcpy(data, ptr, count * sizeof(T));
+ }
+
+ template <typename T>
+ static inline void copyout(T *ptr, T &data)
+ {
+ // FIXME
+ *ptr = data;
+ }
+
+ template <typename T>
+ static inline void copyout(T *ptr, T *data, int count)
+ {
+ // FIXME
+ memcpy(ptr, data, count * sizeof(T));
+ }
+}
+
+#endif
--- /dev/null
+#include <kern/assert.h>
--- /dev/null
+#include <arch/addrs.h>
--- /dev/null
+#ifndef _KERN_ARCH_H
+#define _KERN_ARCH_H
+
+#include <kern/thread.h>
+
+namespace Arch {
+ void arch_init();
+ void timer_init();
+ extern ::Threads::Thread *init_thread;
+ void switch_thread(Threads::Thread *dest, Threads::Thread *src);
+}
+
+#endif
--- /dev/null
+#ifndef _KERN_ASSERT_H
+#define _KERN_ASSERT_H
+
+#include <kern/conf.h>
+#include <kern/libc.h>
+
+namespace Assert
+{
+ static inline __attribute__((noreturn))
+ void assert_failure(const char *file, int line)
+ {
+ printf("Assertion failure at %s:%d\n", file, line);
+ __builtin_trap();
+ }
+}
+
+#include <util/assert.h>
+#define assert(cond) assertl(cond, Assert::Normal)
+
+#endif
--- /dev/null
+#include <lowlevel/bitops.h>
--- /dev/null
+#ifndef _KERN_COMPILER_H
+#define _KERN_COMPILER_H
+
+#include <kern/types.h>
+#include <stddef.h>
+
+#if __GNUC__ >= 3
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#else
+#define likely(x) (x)
+#define unlikely(x) (x)
+#endif
+
+#endif
--- /dev/null
+#ifndef _KERN_CONF_H
+#define _KERN_CONF_H
+
+// FIXME: Autogenerate, with dependencies tracked on specific options,
+// and the ability to test options in makefiles
+
+#include <arch/conf.h>
+
+// CONF_option: General options
+
+#define CONF_SMP false
+
+// CONF_IO_option: Devices and I/O
+
+#define CONF_IO_IRQ_I8259 true
+
+// CONF_MEM_option: Memory management
+
+// At most 2^max_page_alloc_bits pages can be allocated
+// at a time. The default of 10 gives a maximum allocation
+// of 4 MiB with a page size of 4 KiB.
+
+#define CONF_MEM_MAX_PAGE_ALLOC_BITS 10
+
+// CONF_DEBUG_option: Debugging
+
+#define CONF_DEBUG_SPINLOCKS 1
+
+// _UTIL_option: options for the util header library
+
+#define _UTIL_ASSERT_LEVEL 1
+
+#endif
--- /dev/null
+#ifndef _KERN_CONSOLE_H
+#define _KERN_CONSOLE_H
+
+#include <kern/kernel.h>
+
+namespace IO {
+namespace Console {
+ class Console {
+ public:
+ virtual ~Console()
+ {
+ }
+
+ virtual void write(Array<octet> buf, u64 *len) = 0;
+ void write(char *data, size_t len);
+ };
+
+ extern Console *primary_console;
+}
+}
+
+#endif
--- /dev/null
+#ifndef _KERN_EVENT_H
+#define _KERN_EVENT_H
+
+#include <System/Events.h>
+
+namespace Event {
+ using System::VStruct;
+ using System::Events::Event;
+ using namespace System::Notifiers;
+
+ class EventTrigger {
+ public:
+ #include <servers/core/event/Event/EventTrigger.h>
+
+ EventTrigger()
+ {
+ init_iface();
+ }
+
+ virtual ~EventTrigger()
+ {
+ }
+
+ virtual void notify(VStruct *info) = 0;
+ };
+
+ class SyncEventTrigger {
+ EventTrigger *real_trigger;
+
+ public:
+ #include <servers/core/event/Event/SyncEventTrigger.h>
+
+ SyncEventTrigger(EventTrigger *real)
+ {
+ real_trigger = real;
+ init_iface();
+ }
+
+ void notify(VStruct *info)
+ {
+ real_trigger->notify(info);
+ }
+ };
+
+ class EventDispatcher {
+ public:
+ #include <servers/core/event/Event/EventDispatcher.h>
+
+ EventDispatcher()
+ {
+ init_iface();
+ }
+
+ void setup_trigger(Event *event, Notifier *trigger);
+ void setup_sync_trigger(Event *event, SyncNotifier *trigger);
+
+ typedef void (*KFunc)(VStruct *info);
+
+ void setup_kfunc_event(KFunc func, SyncNotifier *trigger);
+ };
+}
+
+#include <servers/core/event/footer.cc>
+
+#endif
--- /dev/null
+// Generic 64-bit PTE, mainly useful for non-process aspaces on 32-bit hosts,
+// so that files, disks, etc. can address >4GiB. 64-bit hosts will probably
+// want to use Arch::PTE instead, especially if direct copying between page
+// tables of similar PTEs is implemented. 32-bit embedded systems may also
+// wish to use Arch::PTE (for both less code and smaller page tables) if all
+// storage can be addressed with 32 bits.
+
+#ifndef _KERN_GENERIC_PTE_H
+#define _KERN_GENERIC_PTE_H
+
+#include <lowlevel/misc.h>
+#include <lowlevel/atomic.h>
+
+namespace Mem {
+ struct GenDirPTE {
+ typedef u64 VirtAddr;
+ typedef ulong PhysAddr;
+
+ void *raw;
+
+ GenDirPTE() : raw(NULL)
+ {
+ }
+
+ GenDirPTE(void *RAW) : raw(RAW)
+ {
+ }
+
+ static uint addr_to_offset(VirtAddr addr, int shift)
+ {
+ int pages_per_table = Arch::page_size / sizeof(void *);
+ return (addr >> shift) & (pages_per_table - 1);
+ }
+
+ void *get_table()
+ {
+ return raw;
+ }
+
+ static GenDirPTE set_table(void *addr)
+ {
+ return GenDirPTE(addr);
+ }
+
+ bool valid_pte()
+ {
+ return raw;
+ }
+
+ void set_pte(GenDirPTE *table, uint offset)
+ {
+ table[offset] = raw;
+ }
+
+ enum {
+ shift_per_level = Arch::page_shift - _LL_LONG_LOGBYTES,
+ };
+ };
+
+ union GenPTE {
+ typedef u64 VirtAddr, PhysAddr;
+ typedef GenDirPTE DirPTE;
+
+ struct {
+#ifdef BITFIELD_LE
+ PhysAddr Valid:1;
+ PhysAddr Writeable:1;
+ PhysAddr Readable:1;
+ PhysAddr Executable:1;
+ PhysAddr User:1;
+ PhysAddr Accessed:1;
+ PhysAddr Dirty:1;
+ PhysAddr FaultOnWrite:1;
+ PhysAddr Addr:56;
+#elif defined(BITFIELD_BE)
+ PhysAddr Addr:56;
+ PhysAddr FaultOnWrite:1;
+ PhysAddr Dirty:1;
+ PhysAddr Accessed:1;
+ PhysAddr User:1;
+ PhysAddr Executable:1;
+ PhysAddr Readable:1;
+ PhysAddr Writeable:1;
+ PhysAddr Valid:1;
+#else
+#error Unspecified/unrecognized bitfield endianness
+#endif
+ };
+
+ PhysAddr raw;
+
+ GenPTE(PhysAddr init) : raw(init)
+ {
+ }
+
+ GenPTE() : raw(0)
+ {
+ }
+
+ operator PhysAddr()
+ {
+ return raw;
+ }
+
+ static uint addr_to_offset(VirtAddr addr, int shift)
+ {
+ int pages_per_table = page_size / sizeof(PhysAddr);
+ return (addr >> shift) & (pages_per_table - 1);
+ }
+
+ PhysAddr pte_to_addr()
+ {
+ return raw & ~((u64)page_size - 1);
+ }
+
+ static GenPTE addr_to_pte(PhysAddr phys)
+ {
+ return phys & ~((u64)page_size - 1);
+ }
+
+ static void flags_to_pte(Mem::PTEFlags flagsin,
+ Mem::PTEFlags maskin,
+ GenPTE &flagsout,
+ GenPTE &maskout)
+ {
+ flagsout = (PhysAddr)flagsin;
+ maskout = (PhysAddr)maskin;
+ }
+
+ GenPTE set_flags(GenPTE mask, GenPTE flags)
+ {
+ return (raw & ~mask) | flags;
+ }
+
+ Mem::PTEFlags pte_to_flags()
+ {
+ return raw & (page_size - 1);
+ }
+
+ void set_pte(GenPTE *table, uint offset)
+ {
+ table[offset] = raw;
+ }
+
+ GenPTE xchg_pte(GenPTE *table, uint offset)
+ {
+ GenPTE old = table[offset];
+ table[offset] = raw;
+ return old;
+ }
+
+ bool valid_pte()
+ {
+ return Valid;
+ }
+
+ bool dirty_pte()
+ {
+ // FIXME: decide how to handle stacked dirty pages
+ return false;
+ }
+
+ enum {
+ page_size = Arch::page_size,
+ page_shift = Arch::page_shift,
+
+ pages_per_table = page_size / sizeof(PhysAddr),
+ shift_per_level = page_shift - 3,
+ num_levels = 2 + (64 - page_shift - shift_per_level - 1) /
+ DirPTE::shift_per_level,
+
+ // kmap is ignored for stacked aspaces
+ kmap_start = 0, kmap_end = 0
+ };
+ };
+}
+
+#endif
--- /dev/null
+#ifndef _KERN_I8259_H
+#define _KERN_I8259_H
+
+#include <kern/irq.h>
+
+namespace IRQ {
+ class I8259 : public InterruptController {
+ u8 cached_masks[2];
+ InterruptSlot irqslots[16];
+
+ void output_mask(int off);
+ void mask(u32 irq);
+ void unmask(u32 irq);
+ void mask_and_ack(u32 irq);
+ int get_pending_irq();
+
+ public:
+ void init();
+ };
+
+ extern I8259 i8259;
+}
+
+#endif
--- /dev/null
+#ifndef _KERN_IO_H
+#define _KERN_IO_H
+
+#include <lowlevel/io.h>
+
+#endif
--- /dev/null
+#ifndef _KERN_IRQ_H
+#define _KERN_IRQ_H
+
+#include <kern/kernel.h>
+#include <util/spinlock.h>
+
+#include <System/IO/Interrupts.h>
+
+namespace IRQ {
+ using namespace Lock;
+
+ typedef System::IO::Interrupts::Interrupt IInterrupt;
+ typedef System::IO::Interrupts::UserInterrupt IUserInterrupt;
+ typedef System::IO::Interrupts::InterruptController IInterruptController;
+ using System::IO::Bus::Device;
+
+ struct Interrupt;
+ class InterruptController;
+
+ struct InterruptSlot {
+ InterruptController *controller;
+ Interrupt *first_int;
+ int mask_count;
+
+ static const u32 Pending = 0x00000001;
+ static const u32 Running = 0x00000002;
+
+ u32 flags;
+ };
+
+ class InterruptController {
+ // When this function returns, any irq instances that were running
+ // when the function was entered will have completed. If not
+ // masked, though, there could be new instances of the IRQ running
+ // when this function exits.
+
+ void wait_for_irq(InterruptSlot *slot);
+
+ protected:
+ SpinLock lock;
+
+ // The derived constructor must initialize these.
+ InterruptSlot *irqs;
+ u32 num_irqs;
+
+ virtual void mask_and_ack(u32 irq) = 0;
+ virtual void mask(u32 irq) = 0;
+ virtual void unmask(u32 irq) = 0;
+
+ // Returns the highest-priority pending, unmasked interrupt, or
+ // negative if no pending unmasked interrupts.
+
+ virtual int get_pending_irq() = 0;
+
+ public:
+// #include "core/irq-server/IRQ/InterruptController.h"
+
+ virtual ~InterruptController()
+ {
+ }
+
+ void request_int(InterruptSlot *slot, Interrupt *irq);
+ void free_int(Interrupt *irq);
+
+ // If irq is non-negative, then the given IRQ number is the IRQ
+ // source (this is a hack for x86 and x64, which provide this
+ // information via the entry point used). Otherwise,
+ // get_pending_irq() will be called repeatedly (after handling
+ // whatever interrupt was returned on the previous iteration) until
+ // it returns negative. This returns true if any of the handlers
+ // called returned true.
+
+ bool handle_irq(int irq);
+
+ // Recursively request masking/unmasking of an interrupt.
+
+ void rec_mask(InterruptSlot *slot);
+ void rec_unmask(InterruptSlot *slot);
+
+ // Like rec_mask, but does not wait for the completion of any pending
+ // or running handlers.
+
+ void rec_mask_nowait(InterruptSlot *slot);
+
+ InterruptSlot *get_slot(u32 irq)
+ {
+ assert(irq < num_irqs);
+ return &irqs[irq];
+ }
+
+ u32 get_irqnum(InterruptSlot *slot)
+ {
+ assert(slot - irqs >= 0 && slot - irqs < (int)num_irqs);
+ return slot - irqs;
+ }
+ };
+
+ struct Interrupt {
+ Interrupt *next;
+ InterruptController *controller;
+ InterruptSlot *slot;
+ Device device;
+
+ virtual ~Interrupt()
+ {
+ }
+
+ // Returns true if there was an interrupt condition present
+ // on the device; false otherwise. This is used to detect
+ // stuck interrupts.
+
+ virtual bool action() = 0;
+
+#if 0
+ #include "core/irq-server/IRQ/Interrupt.h"
+
+ Interrupt()
+ {
+ init_iface();
+ }
+#endif
+
+ void get_device(Device *dev)
+ {
+ if (dev)
+ *dev = device;
+ }
+
+ void get_controller(IInterruptController *con)
+ {
+// if (con)
+// *con = parent;
+ }
+
+ void get_num(u32 *irqnum)
+ {
+ if (irqnum)
+ *irqnum = controller->get_irqnum(slot);
+ }
+ };
+
+ // FIXME: per-CPU
+ extern bool in_irq;
+}
+
+#endif
--- /dev/null
+// Generic stuff that pretty much everything is going to include.
+
+#ifndef _KERN_KERNEL_H
+#define _KERN_KERNEL_H
+
+#include <orb.h>
+#include <System/Objects.h>
+#include <System/Exceptions.h>
+
+#include <kern/types.h>
+#include <kern/conf.h>
+#include <kern/assert.h>
+#include <lowlevel/misc.h>
+
+using System::RunTime::countarray;
+using System::RunTime::Array;
+using System::RunTime::nullarray;
+using System::Object;
+using System::Objects::Factory;
+using namespace System::Exceptions::Std;
+
+// When set, bypass (or use alternate) console locks and halt on any
+// further faults. FIXME: should be per-CPU.
+
+extern int in_fault;
+
+#endif
--- /dev/null
+#ifndef _KERN_LIBC_H
+#define _KERN_LIBC_H
+
+#include <kern/types.h>
+#include <stdarg.h>
+
+size_t vsnprintf(char *buf, size_t size, const char *str, va_list args);
+size_t snprintf(char *buf, size_t size, const char *str, ...)
+__attribute__((format(printf, 3, 4)));
+size_t sprintf(char *buf, const char *str, ...)
+__attribute__((format(printf, 2, 3)));
+size_t printf(const char *str, ...)
+__attribute__((format(printf, 1, 2)));
+
+// These are C-ABI so libgcc and libsupc++ can use them.
+extern "C" {
+ void *memcpy(void *dest, const void *src, size_t len);
+ void *memmove(void *dest, const void *src, size_t len);
+ int memcmp(const void *b1, const void *b2, size_t len);
+ void *memset(void *b, int ch, size_t len);
+
+ size_t strnlen(const char *s, size_t n);
+ size_t strlen(const char *s);
+
+ char *strcpy(char *dest, const char *src);
+ char *strncpy(char *dest, const char *src, size_t len);
+
+ void bzero(void *b, size_t len);
+
+ void *malloc(size_t size);
+ void free(void *ptr);
+ void abort();
+}
+
+void run_ctors();
+
+// Placement new operators
+
+inline void *operator new(size_t len, void *addr)
+{
+ return addr;
+}
+
+inline void *operator new[](size_t len, void *addr)
+{
+ return addr;
+}
+
+#endif
--- /dev/null
+#include <util/list.h>
--- /dev/null
+#ifndef _KERN_LOCK_H
+#define _KERN_LOCK_H
+
+#include <kern/kernel.h>
+#include <kern/spinlock.h>
+#include <kern/thread.h>
+#include <util/spinlock.h>
+
+namespace Lock {
+ extern int dying;
+
+ struct Lock {
+ SpinLock spinlock;
+ Threads::WaitQueue waitqueue;
+ ulong lockval;
+
+ Lock()
+ {
+ lockval = 0;
+ }
+
+ void lock();
+ void unlock();
+
+ bool held_by_curthread()
+ {
+ return lockval == reinterpret_cast<ulong>(curthread);
+ }
+ };
+}
+
+#endif
--- /dev/null
+#ifndef _KERN_MEM_H
+#define _KERN_MEM_H
+
+#include <System/Mem.h>
+
+#include <kern/kernel.h>
+#include <arch/mem.h>
+#include <arch/addrs.h>
+
+#include <util/rbtree.h>
+#include <util/list.h>
+#include <util/lock.h>
+#include <kernel/region.h>
+#include <lowlevel/bitops.h>
+
+namespace Mem {
+ // Used for allocating memory at boot time before the page allocator
+ // is running. Alignment must be a power of 2. Because nothing other
+ // than the kernel is guaranteed to be mapped from the beginning on
+ // all architectures, no generic code should use this until after
+ // Arch::arch_init() has run and set up physical memory mappings.
+ //
+ // This function may not be used after the page allocator has
+ // been initialized by architecture code.
+ //
+ // Architectures must provide Arch::next_free_bootmem initalized
+ // to the first free piece of bootmem.
+
+ static inline void *get_bootmem(size_t size, size_t align)
+ {
+ uintptr_t ret = (Arch::next_free_bootmem + align - 1) & ~(align - 1);
+ Arch::next_free_bootmem = ret + size;
+ return reinterpret_cast<void *>(ret);
+ }
+
+ typedef System::Mem::AddrSpace IAddrSpace;
+ typedef System::Mem::Mappable IMappable;
+ using System::Mem::Cacheable;
+ using System::Mem::Region;
+ using System::Mem::RegionWithOffset;
+ using System::Mem::AllocFlags;
+ using System::Mem::MapFlags;
+ using System::Mem::AccessFlags;
+
+ union PTEFlags {
+ struct {
+ // This must be kept in sync with include/kern/generic-pte.h
+
+#ifdef BITFIELD_LE
+ // Readable, Writeable, and Executable are for permission only,
+ // not for implementing copy on write, swapping, etc.
+
+ ulong Valid:1;
+ ulong Writeable:1;
+ ulong Readable:1;
+ ulong Executable:1;
+ ulong User:1;
+ ulong Accessed:1;
+ ulong Dirty:1;
+
+ // If set, then on a write access, the page is copied and this
+ // address space gets the new, anonymous version. The rmap list
+ // is then traversed; all downstream mappings will share the new
+ // copy.
+ //
+ // For vareas that directly map something other than an address
+ // space, the action to be taken on a write fault is
+ // mapping-specific.
+
+ ulong FaultOnWrite:1;
+
+ // VArea Only:
+ // Do not allow the user to unmap or modify flags.
+ // Used for the shared user/kernel mappings.
+
+ ulong Protected:1;
+
+#elif defined(BITFIELD_BE)
+ ulong pad:_LL_LONG_BYTES * 8 - 9;
+ ulong Protected:1;
+ ulong FaultOnWrite:1;
+ ulong Dirty:1;
+ ulong Accessed:1;
+ ulong User:1;
+ ulong Executable:1;
+ ulong Readable:1;
+ ulong Writeable:1;
+ ulong Valid:1;
+#else
+#error Unspecified/unrecognized bitfield endianness
+#endif
+ };
+
+ ulong raw;
+
+ PTEFlags(ulong init) : raw(init)
+ {
+ }
+
+ PTEFlags() : raw(0)
+ {
+ }
+
+ operator ulong()
+ {
+ return raw;
+ }
+ };
+
+ using Arch::kvirt_to_phys;
+ using Arch::phys_to_kvirt;
+ class PageTable;
+ class AddrSpace;
+
+ struct VirtualArea;
+ typedef Util::RBTree<VirtualArea, Region, u64> VirtualAreaTree;
+
+ class Mappable {
+ protected:
+ // This linked list keeps track of the virtual areas that map this
+ // mappable (this is not transitive; vareas that map a varea that
+ // maps this mappable are not on this list).
+ //
+ // OPT: rbtree keyed on mapped address range?
+
+ Util::List mappings;
+ Lock::SpinLock mappings_lock;
+
+ public:
+ bool is_aspace;
+
+ virtual void get_size(u64 *size) = 0;
+
+ virtual void get_block_size(u64 *block_size)
+ {
+ *block_size = Arch::page_size;
+ }
+
+ // Register/unregister varea as mapping this mappable.
+
+ virtual void map(VirtualArea *varea);
+ virtual void unmap(VirtualArea *varea);
+
+ // Make the specified page available for mapping. This must be
+ // done before map() will succeed. It is possible (though
+ // unlikely) that the pages will be removed before map() is called,
+ // causing map() to return false. In such a case, pagein should be
+ // called again by the fault handler. If the mapping fails for
+ // other reasons (such as lack of permission, a hole in a stacked
+ // aspace, or an I/O error) then pagein() will throw a BadPageFault
+ // exception.
+
+ virtual void pagein(u64 vaddr, PTEFlags reqflags) = 0;
+
+ // Returns the physical address and flags associated with a given
+ // virtual address. If flags.Valid is not set, then phys and all
+ // other flags are undefined, and pagein() should be retried.
+ // rmap_lock must be held.
+
+ virtual void get_entry(u64 vaddr, u64 *phys, PTEFlags *flags) = 0;
+
+ #include <servers/mem/addrspace/Mem/Mappable.h>
+
+ Mappable()
+ {
+ init_iface();
+ is_aspace = false;
+ }
+
+ virtual ~Mappable()
+ {
+ }
+ };
+
+ struct VirtualArea {
+ AddrSpace *aspace;
+
+ // The red/black tree is used to find a region based on address.
+ //
+ // The linked list is kept in order and is used to iterate over
+ // vmas in a region (after looking up the starting point in the
+ // tree, unless the region is the entire address space).
+
+ VirtualAreaTree::Node rbtree_node;
+ Util::List list_node;
+ Util::List mappings_node;
+
+ PTEFlags flags;
+ Mappable *ma;
+
+ // This is added to the virtual address to get the offset
+ // into the mappable.
+ s64 offset;
+
+ Region ®ion()
+ {
+ return rbtree_node.value;
+ }
+ };
+
+ // If the padded size of this changes, update rmap_shift.
+ // and the alignment check in RMapTable::unmap.
+
+ // If the layout of this changes, update the offsets below.
+ union RMapNode {
+ struct {
+ u64 vaddr;
+ VirtualArea *va;
+ Util::ListNoAutoInit head, tail;
+ };
+
+ long pad[8];
+
+ enum {
+ head_offset = sizeof(u64) + sizeof(void *),
+ tail_offset = head_offset + sizeof(void *) * 2,
+ };
+ };
+
+ // This lock protects the rmap chains and rmap tables. It also makes
+ // atomic the PageTable::get_entry, RMapTable::map, PageTable::map
+ // sequence.
+ //
+ // OPT: This lock is acquired on all map/unmap activity; if/when this
+ // turns out to be a significant bottleneck, finer-grained locking can
+ // be used. I decided against doing it now because it would be
+ // somewhat complicated (but I believe do-able) to avoid all races,
+ // and I'd like to move on to implementing other things for now.
+
+ extern Lock::Lock rmap_lock;
+ class Page;
+
+ class RMapTable {
+ void *toplevel;
+ int toplevel_shift;
+
+ RMapNode *get_rmap(u64 virtaddr, bool add = false);
+
+ public:
+ RMapTable();
+
+ // rmap_lock must be held.
+ static void map(VirtualArea *downstream_va, PageTable *upstream_ptbl,
+ u64 virtaddr, u64 upstream_vaddr);
+
+ void unmap(u64 virtaddr);
+
+ // Handle a copy-on-write for the specified page and all downstream
+ // mappings. All such mappings are set to the new page, and
+ // FaultOnWrite is cleared.
+
+ void break_copy_on_write(u64 virtaddr, Page *new_page);
+ };
+
+ class PageTable {
+ public:
+ void *toplevel;
+ RMapTable rmap_table;
+ const bool is_process;
+
+ typedef Mem::PTEFlags Flags;
+ typedef System::Mem::Region Region;
+ typedef System::Mem::RegionWithOffset RegionWithOffset;
+
+ PageTable(bool process) : is_process(process)
+ {
+ }
+
+ virtual ~PageTable()
+ {
+ }
+
+ // Region is virtual, offset is physical
+ virtual void map(RegionWithOffset region, Flags flags) = 0;
+ virtual void unmap(Region region) = 0;
+
+ // Sets the flags which are set in mask to their value in flags.
+ // Flags not set in mask are untouched.
+
+ virtual void set_flags(Region region, Flags flags, Flags mask) = 0;
+
+ // Returns the physical address and flags associated with a given
+ // virtual address. If flags.Valid is not set, then phys and all
+ // other flags are undefined. This function is mainly used for
+ // propagating stacked aspace PTEs.
+
+ virtual void get_entry(u64 vaddr, u64 *phys, Flags *flags) = 0;
+
+ virtual void get_size(u64 *size) = 0;
+
+ // This is called when a PTE is replaced. It handles refcounting,
+ // dirty page queueing, and TLB invalidation. vaddr is only
+ // valid for process address spaces, so it doesn't need to be
+ // 64-bit (except on 64-bit hardware, of course). When it is
+ // known that only flags are changing, set no_release so that
+ // the page refcount is not decremented.
+
+ void kill_pte(ulong vaddr, u64 physaddr, bool dirty, bool valid,
+ bool no_release = false);
+ };
+
+ struct BadPageFault {
+ };
+
+ class ASpaceMappable : public Mappable {
+ AddrSpace *aspace;
+
+ static bool rec_pagein(AddrSpace *aspace, u64 vaddr,
+ PTEFlags reqflags);
+
+ public:
+ ASpaceMappable (AddrSpace *ASPACE) : aspace(ASPACE)
+ {
+ is_aspace = true;
+ }
+
+ void get_size(u64 *size);
+
+ // Unexported
+ virtual void pagein(u64 vaddr, PTEFlags reqflags);
+ virtual void get_entry(u64 vaddr, u64 *phys, PTEFlags *flags);
+
+ friend class AddrSpace;
+ };
+
+ class AddrSpace {
+ // OPT: Coalesce vareas when possible (except when setting flags to
+ // match surrounding vareas, as the flags are likely to change
+ // again if they've already changed).
+
+ // OPT: A subclass of AddrSpace that doesn't use
+ // VirtualArea::offset, but rather has its own virtual method that
+ // figures out offsets to the next level using its own data
+ // structures (such as filesystem block tables). This would avoid
+ // excessive vareas for fragmented files. Whether the excess of
+ // vareas is significant enough for this to be worthwhile remains
+ // to be seen.
+
+ VirtualAreaTree varea_tree;
+ Util::List varea_list;
+ Lock::Lock lock;
+ bool is_process;
+ // This defines the start and end of the aspace; mappings outside
+ // this range may not be done, and will not be returned by
+ // get_free_region(). For process aspaces, this goes from
+ // Arch::user_start to Arch::user_end. For non-proc aspaces, this
+ // can be anything.
+
+ Region aspace_region;
+
+ // Returns true if there is a mapped region that overlaps the given
+ // region. If there is a collision, then the first overlapping
+ // varea is returned in va. Otherwise, it returns the last mapped
+ // area before the region in va (if there are no areas, or the
+ // region is before the first area, then prev is NULL). The aspace
+ // lock must be held.
+
+ bool check_overlap(Region region, VirtualArea *&va);
+
+ // Finds a free region of the requested length and puts it in
+ // region. Returns true if an appropriate area is found. The prev
+ // pointer is as in check_overlap. The aspace lock must be held.
+
+ bool get_free_region(ulong len, Region ®ion, VirtualArea *&prev);
+
+ // This is the value after the last region returned by
+ // get_free_region. If there was an intervening unmap for a lower
+ // address, then it is set to that address instead.
+
+ u64 cached_free_region;
+
+ static u64 rec_unmap(AddrSpace *aspace, Region region,
+ PTEFlags reqflags, VirtualArea *va);
+
+ // If there are multiple virtual areas that cover the specified region,
+ // split them at the region's boundaries. The first varea in the region
+ // (if any) is returned. The aspace lock must be held.
+
+ VirtualArea *split_varea(Region region);
+
+ void break_copy_on_write(VirtualArea *va, u64 vaddr, u64 phys);
+ bool map(VirtualArea *va, u64 vaddr, PTEFlags reqflags);
+
+ public:
+ #include <servers/mem/addrspace/Mem/AddrSpace.h>
+
+ ASpaceMappable mappable;
+ PageTable *page_table;
+
+ AddrSpace(bool process);
+ AddrSpace(void *page_table);
+
+ // Returns true if the fault was "good"; otherwise, the caller
+ // should dump regs. exec should only be used if the CPU
+ // implements per-page exec protection; otherwise, treat it
+ // as a read.
+
+ bool handle_fault(ulong addr, bool write, bool exec, bool user);
+
+ void get_mappable(IMappable *ma);
+ void clone(IAddrSpace *addrspace, u8 clone_is_real);
+
+ void alloc_and_map(u64 len, u64 *vstart,
+ AllocFlags aflags, MapFlags mflags);
+
+ enum {
+ map_user,
+ map_protected,
+ map_kernel
+ };
+
+ void map(IMappable ma, Region region, u64 *vstart, MapFlags mflags,
+ bool from_kernel = false, int map_type = map_user);
+ void unmap(Region region, bool from_kernel = false);
+
+ void set_mapflags(Region region, MapFlags mflags);
+ void get_mapflags(Region region, MapFlags *mflags, uint8_t *all_same);
+ void get_mapping(Region region, IMappable *ma, u64 *offset);
+
+ void get_page_size(u32 *page_size);
+ void get_min_align(u32 *min_align);
+
+ void get_size(u64 *size)
+ {
+ page_table->get_size(size);
+ }
+
+ friend void Arch::set_aspace(AddrSpace *aspace);
+ friend class ASpaceMappable;
+ };
+
+ extern Factory addr_space_factory, proc_addr_space_factory;
+
+ using ::System::RunTime::orbmm;
+
+ static inline bool page_aligned(u64 addr)
+ {
+ return !(addr & (u64)(Arch::page_size - 1));
+ }
+
+ static inline u64 page_align(u64 addr)
+ {
+ return addr & ~(u64)(Arch::page_size - 1);
+ }
+
+ // FIXME: Valid user addr? Paging holes?
+ static inline bool valid_addr(uint64_t addr)
+ {
+ if (sizeof(void *) == 8)
+ return true;
+
+ return (addr >> 32) == 0;
+ }
+};
+
+#endif
--- /dev/null
+#ifndef _KERN_NOTIFIER_H
+#define _KERN_NOTIFIER_H
+
+#include <System.h>
+#include <System/IO.h>
+
+using System::Notifiers::Notifier;
+
+namespace IO {
+ using System::IO::IONotifierInfo;
+ typedef System::IO::IONotifierInfo_ns::Result io_result;
+
+ static inline void io_notify(Notifier n, size_t len, io_result result)
+ {
+ // FIXME: ini needs to live longer.
+ IONotifierInfo ini;
+ ini.len = len;
+ ini.result = result;
+ n.notify(&ini);
+ }
+}
+
+#endif
--- /dev/null
+#ifndef _KERN_ORB_H
+#define _KERN_ORB_H
+
+#include <kern/types.h>
+#include <orb.h>
+
+#include <util/list.h>
+#include <System/Objects.h>
+
+namespace Mem {
+ class AddrSpace;
+};
+
+namespace Threads {
+ class Thread;
+};
+
+namespace ORB {
+ typedef ulong ID;
+
+ struct CallFrame {
+ // Address Space and PC to return to
+ Mem::AddrSpace *ret_aspace;
+ ulong ret_pc;
+
+ // Caller's PIB Pointer
+ System::RunTime::ParamInfoBlock *caller_user_pib;
+
+ // Object and Method that were called -- it probably isn't strictly
+ // necessary to keep track of this here, but it'd help in doing a
+ // "traceforward" of the method invocation stack in order to
+ // debug a stalled method call.
+
+ ID object, method;
+ };
+
+ struct CallStackHeader {
+ Threads::Thread *thread;
+ Util::List node;
+
+ // Number of CallFrames under this header.
+ // For a full page, this is:
+ // (Arch::page_size - sizeof(CallStackHeader)) / sizeof(CallFrame)
+ // There is also a much smaller group of call frames in the Thread
+ // structure, so that an ORB stack page doesn't need to be allocated
+ // in the common case of shallow method nesting.
+
+ int num_frames;
+
+ CallFrame frames[0];
+ };
+}
+
+#endif
--- /dev/null
+#ifndef _KERN_PAGEALLOC_H
+#define _KERN_PAGEALLOC_H
+
+#include <System/Mem.h>
+
+#include <kern/kernel.h>
+#include <kern/list.h>
+#include <kern/mem.h>
+
+#include <arch/paging.h>
+#include <arch/addrs.h>
+
+#include <lowlevel/atomic.h>
+#include <util/lock.h>
+
+namespace Mem {
+ class PageAllocZone;
+ class Page;
+
+ union PageFlags {
+ struct bits {
+ enum {
+ Dirty = 0
+ };
+ };
+
+ struct {
+ uint32_t Dirty:1;
+ };
+
+ uint32_t pad;
+ };
+
+ // This is an array of all pages in all zones. For now, zones must be
+ // contiguous (or at least close enough to each other that it's not
+ // a big deal to waste the space for the intervening page structs).
+
+ extern Page *pages, *last_page;
+
+ static inline uintptr_t page_to_phys(Page *page);
+
+ static inline bool is_phys_page(Page *page)
+ {
+ return page >= Mem::pages && page < last_page;
+ }
+
+ struct Page {
+ // A page is reserved if it is neither Free or InUse.
+ // Free and InUse may not be set at the same time.
+
+ static const u32 Free = 0x00000001;
+ static const u32 InUse = 0x00000002;
+
+ u32 flags;
+ PageAllocZone *zone;
+
+ union {
+ // These fields are valid when a page is free, if it is the
+ // first or last page of the chunk.
+
+ struct {
+ // For the first page of a chunk, this points to the last
+ // page of the chunk. For the last page of the chunk, this
+ // points to the first page of the chunk. For a single-page
+ // chunk, this points to itself. For a bin's list head, this
+ // points to the smallest bin at least as large as itself
+ // that has an available chunk.
+
+ Page *otherside;
+ } free;
+
+ // These fields are valid when a page is in use (not free or
+ // reserved).
+
+ struct {
+ int32_t refcount;
+ PageFlags flags;
+ } inuse;
+ };
+
+ // chunk when free, rmap when in use
+ //
+ // chunk points to the last page of the previous chunk in
+ // the bin and the first page of the next chunk in the bin.
+ // Only valid for the first page of a chunk. Prevchunk of the
+ // first chunk and nextchunk of the last chunk point to the
+ // bin's list head.
+ //
+ // rmap points to each mapping (as an RMapNode) of this page.
+ // NOTE: rmap list currently unused
+
+ Util::List chunk_rmap_list;
+
+ void retain()
+ {
+ if (_UTIL_ASSERT_LEVEL >= 1) {
+ if (!is_phys_page(this) || !(flags & InUse)) {
+ in_fault++;
+ printf("Page %p (phys %lx) retained flags %x, is_phys %d\n",
+ this, page_to_phys(this), flags,
+ (int)is_phys_page(this));
+ __builtin_trap();
+ }
+ }
+// assert(flags & InUse);
+ assert(inuse.refcount >= 1);
+ ll_atomic_inc(&inuse.refcount);
+ }
+
+ void free_page();
+
+ void release()
+ {
+ if (_UTIL_ASSERT_LEVEL >= 1) {
+ if (!is_phys_page(this) || !(flags & InUse)) {
+ in_fault++;
+ printf("Page %p (phys %lx) released flags %x, is_phys %d\n",
+ this, page_to_phys(this), flags,
+ (int)is_phys_page(this));
+ __builtin_trap();
+ }
+ }
+// assert(flags & InUse);
+ assert(inuse.refcount >= 1);
+ if (ll_atomic_dec_and_test(&inuse.refcount))
+ free_page();
+ }
+
+ int32_t get_refcount()
+ {
+ assert(is_phys_page(this));
+ assert(flags & InUse);
+ assert(inuse.refcount >= 1);
+ return inuse.refcount;
+ }
+ };
+
+ static inline Page *phys_to_page(uintptr_t phys)
+ {
+ return pages + (phys >> Arch::page_shift);
+ }
+
+ static inline uintptr_t page_to_phys(Page *page)
+ {
+ return (page - pages) << Arch::page_shift;
+ }
+
+ static inline Page *kvirt_to_page(void *kvirt)
+ {
+ return phys_to_page(kvirt_to_phys(kvirt));
+ }
+
+ static inline void *page_to_kvirt(Page *page)
+ {
+ return phys_to_kvirt(page_to_phys(page));
+ }
+
+ class PageAllocZone {
+ Lock::SpinLock lock;
+
+ enum {
+ num_bins = CONF_MEM_MAX_PAGE_ALLOC_BITS
+ };
+
+ // List head for each bin
+ Page bins[num_bins];
+
+ // First and last pages of the zone
+ intptr_t start, end;
+ size_t zonesize; // end - start + 1
+
+ size_t chunk_size(Page *start);
+
+ uint bin_to_size(int bin);
+ int size_to_bin_alloc(size_t size);
+ int size_to_bin_free(size_t size);
+
+ Page *bin_to_head(int bin);
+
+ void remove_chunk(Page *start, int bin);
+ void add_to_bin(Page *chunk, int bin);
+ Page *shrink_chunk(Page *start, int num_pages,
+ size_t chunk_size, int bin);
+
+ public:
+ // base and size are in pages; all pages must be reserved
+ // (i.e. flags set to zero).
+
+ void init(uintptr_t base, size_t size);
+
+ Page *alloc(uint num_pages);
+ void free(Page *head, size_t num_pages);
+ };
+
+ class PageAlloc {
+ public:
+// #include "mem/pagealloc-server/Mem/PageAlloc.h"
+
+ // Architectures must define lists of zones, in preference order,
+ // to use for each type of allocation; these are indices into
+ // Arch::pagezonelists.
+ //
+ // The ISA DMA zonelist is for ISA DMA buffers, which must be below
+ // some platform-dependent limit (typically 16MiB).
+ //
+ // The DMA32 zonelist is for devices on a 32-bit bus (such as
+ // ordinary PCI), so that buffers over 4GiB are not used unless
+ // the platform provides a suitable mapping mechanism (such
+ // as an IOMMU).
+ //
+ // An architecture may define additional zonelists for its internal
+ // use, but it's best to pass them directly as pointers than to
+ // define additional numbers, so as to avoid conflict if the
+ // generic list expands in the future.
+
+ enum {
+ zonelist_normal = 0,
+ zonelist_dma32 = 1,
+ zonelist_isadma = 2,
+ };
+
+ // Note that while num_pages does not have to be a power of 2 (and
+ // the allocation size will not be rounded up to the next power of
+ // 2), a request that is not a power of two may fail despite the
+ // existence of a suitable chunk if there is no available chunk of
+ // the next-higher power of 2. num_pages may not be 0.
+
+ static Page *alloc(uint num_pages, PageAllocZone *const *zonelist);
+
+ static Page *alloc(uint num_pages, int zone = zonelist_normal)
+ {
+ assert(zone >= 0 && zone <= 2);
+
+ return alloc(num_pages, Arch::pagezonelists[zone]);
+ }
+
+ // Any span of allocated pages may be freed; it does not have to
+ // correspond to the size and start of the original allocation, and
+ // it may be larger than the maximum allocation size (though this
+ // is likely only useful during bootup when adding new chunks of
+ // memory). All pages must be in the same zone. num_pages may not
+ // be 0.
+
+ static void free(Page *head, size_t num_pages)
+ {
+ head->zone->free(head, num_pages);
+ }
+ };
+
+ extern PageAlloc page_alloc;
+
+
+ static inline void *alloc_pages(int num,
+ int zone = PageAlloc::zonelist_normal)
+ {
+ return Mem::page_to_kvirt(PageAlloc::alloc(num, zone));
+ }
+
+ static inline void free_pages(void *addr, int num)
+ {
+ if (addr)
+ PageAlloc::free(kvirt_to_page(addr), num);
+ }
+
+ static inline void retain_if_phys(ulong addr)
+ {
+ Page *page = phys_to_page(addr);
+
+ if (is_phys_page(page))
+ page->retain();
+ }
+
+ static inline void release_if_phys(ulong addr)
+ {
+ Page *page = phys_to_page(addr);
+
+ if (is_phys_page(page))
+ page->release();
+ }
+}
+
+#endif
--- /dev/null
+// This is a generic pagetable implementation that most architectures
+// should be able to use as is, though architectures with weird paging
+// hardware can provide their own implementation. It corresponds to
+// mem/pagetable.cc.
+
+#ifndef _KERN_PAGETABLE_H
+#define _KERN_PAGETABLE_H
+
+#include <kern/mem.h>
+#include <util/lock.h>
+#include <arch/pagetable.h>
+
+namespace Mem {
+ template<typename PTE>
+ class PageTableImpl : public PageTable {
+ public:
+ typedef Mem::PTEFlags Flags;
+ typedef System::Mem::Region Region;
+ typedef System::Mem::RegionWithOffset RegionWithOffset;
+ typedef typename PTE::VirtAddr VirtAddr;
+ typedef typename PTE::DirPTE DirPTE;
+
+ private:
+ // The lock of any page table may nest in the lock of any
+ // aspace.
+
+ Lock::Lock lock;
+
+ // For non-process aspaces, the number of levels may be more or
+ // less than what the hardware provides (in particular, large file
+ // mappings on 32-bit targets will need more levels). For process
+ // aspaces, num_levels must equal PTE::num_levels. Levels for
+ // non-process address spaces can be added dynamically as needed.
+ // Non-proc aspaces may also use a different PTE format.
+
+ int num_levels;
+ int toplevel_shift, lastlevel_shift;
+
+ static uint pages_per_table()
+ {
+ return 1 << PTE::shift_per_level;
+ }
+
+ static uint pages_per_dtable()
+ {
+ return 1 << DirPTE::shift_per_level;
+ }
+
+ void end_map(RegionWithOffset region, PTE flags, void *table);
+
+ void end_unmap(Region region, void *table);
+
+ void end_set_flags(Region region, PTE flags, PTE mask, void *table);
+
+ void rec_map(RegionWithOffset region, PTE flags,
+ void *table, int shift);
+
+ void rec_unmap(Region region, void *table, int shift);
+
+ void rec_set_flags(Region region, PTE flags,
+ PTE mask, void *table, int shift);
+
+ public:
+ PageTableImpl(bool process);
+ PageTableImpl(void *table);
+
+ virtual ~PageTableImpl();
+
+ virtual void map(RegionWithOffset region, Flags flags);
+ virtual void unmap(Region region);
+ virtual void set_flags(Region region, Flags flags, Flags mask);
+ virtual void get_entry(u64 addr, u64 *phys, Flags *flags);
+
+ virtual void get_size(u64 *size)
+ {
+ if (is_process)
+ *size = 1ULL << (PTE::num_levels * PTE::shift_per_level);
+ else
+ *size = 1ULL << (64 - PTE::page_shift);
+ }
+ };
+}
+
+#endif
--- /dev/null
+#include <arch/paging.h>
--- /dev/null
+#ifndef _KERN_SPINLOCK_H
+#define _KERN_SPINLOCK_H
+
+#include <kern/kernel.h>
+#include <lowlevel/misc.h>
+
+extern int dying;
+
+namespace Lock {
+ struct SpinLock {
+ #if CONF_DEBUG_SPINLOCKS
+ unsigned long held_at;
+ #endif
+
+ SpinLock()
+ {
+ init();
+ }
+
+ void init()
+ {
+ // Nothing on UP. Call arch on SMP.
+
+ #if CONF_DEBUG_SPINLOCKS
+ held_at = 0;
+ #endif
+ }
+
+ void lock()
+ {
+ // Nothing on UP. Call arch on SMP.
+
+ #if CONF_DEBUG_SPINLOCKS
+ #if !CONF_SMP
+ if (!in_fault && held_at) {
+ in_fault++;
+ printf("Spinlock deadlock, lock %p previously held at 0x%lx\n",
+ this, held_at);
+ in_fault--;
+ BUG();
+ }
+ #endif
+
+ held_at = (ulong)__builtin_return_address(0);
+ #endif
+ }
+
+ void unlock()
+ {
+ #if CONF_DEBUG_SPINLOCKS
+ held_at = 0;
+ #endif
+
+ // Nothing on UP. Call arch on SMP.
+ }
+
+ void lock_irq()
+ {
+ ll_ints_off();
+ lock();
+ }
+
+ void unlock_irq()
+ {
+ unlock();
+ ll_ints_on();
+ }
+
+ ulong lock_recirq()
+ {
+ unsigned long ret = ll_ints_save_and_off();
+ lock();
+ return ret;
+ }
+
+ void unlock_recirq(ulong savedirq)
+ {
+ unlock();
+ ll_ints_restore(savedirq);
+ }
+ };
+}
+
+#endif
--- /dev/null
+#ifndef _KERN_THREAD_H
+#define _KERN_THREAD_H
+
+#include <kern/types.h>
+#include <kern/time.h>
+#include <arch/thread.h>
+#include <util/list.h>
+#include <kern/irq.h>
+#include <kern/spinlock.h>
+#include <kern/orb.h>
+
+extern "C" void schedule();
+extern "C" void sched_new_thread();
+
+// FIXME: per-CPU
+extern int need_resched;
+
+namespace Mem {
+ class AddrSpace;
+}
+
+namespace Threads {
+ class Thread;
+ class WaitQueue;
+
+ // This is a reasonably simple O(1) scheduler that provides both
+ // real-time and timeshared scheduling, with (non-rt) priority boosts
+ // for interactive tasks.
+ //
+ // At some point, it'd be nice to extend/replace this with something
+ // that gives more options to how to schedule tasks. Priority
+ // inheritance would be nice, as would the ability to schedule groups
+ // of threads as one prior to scheduling within the group. The latter
+ // would help avoid giving more CPU time to certain apps simply
+ // because they divided their work among more threads (or to certain
+ // users simply because they're running more programs).
+ //
+ // At some sooner point, SMP support will need to be added.
+
+ class Sched {
+ public:
+ enum {
+ // The default timeslice of 10 ms applies to priority 8
+ // timeshared tasks.
+
+ default_timeslice = 10000000,
+ rt_prios = 256,
+ ts_static_prios = 16,
+
+ // This must not exceed 32 without increasing the size
+ // of ts_bitmap.
+
+ ts_prios = 32
+ };
+
+ private:
+ ulong bitmap[rt_prios / sizeof(ulong)];
+ Util::List rt_runqueue[rt_prios];
+
+ u32 ts_bitmap, ts_depleted_bitmap;
+ int last_replenish;
+
+ Util::List ts_runqueue[ts_prios];
+ Util::List ts_depleted[ts_prios];
+
+ Lock::SpinLock runqueue_lock;
+
+ void schedule_nolock();
+ Thread *best_rt(int prio);
+ Thread *best_ts();
+
+ void replenish_prio(int prio);
+ void replenish_all();
+
+ static u32 prio_to_slice(int prio);
+ static int slice_to_prio(u32 slice);
+
+ Time::KTimerEntry resched_timer;
+
+ public:
+ Util::List threadlist;
+
+ // FIXME: use sleeping lock once implemented
+ Lock::SpinLock threadlist_lock;
+
+ typedef void (*thread_func)(void *arg);
+
+ Thread *new_thread(thread_func func, void *arg, char *name = NULL);
+ void schedule();
+ void sched_new_thread();
+
+ void init();
+
+ friend class Thread;
+ friend class WaitQueue;
+ };
+
+ extern Sched sched;
+
+ struct Blocker {
+ Util::List list_node;
+ bool blocked;
+
+ // Besides the obvious use by ThreadBlocker, this is used in
+ // CascadeBlocker by WaitQueue both for prioritization of wakeups
+ // and for calling wake_nolock().
+
+ Thread *thread;
+
+ Blocker()
+ {
+ blocked = true;
+ }
+
+ virtual ~Blocker()
+ {
+ }
+
+ virtual void wake() = 0;
+
+ // Like wake, but doesn't wake the thread -- used by WaitQueue
+ // which calls thread->wake_nolock itself.
+
+ virtual void unblock() = 0;
+ };
+
+ // This is a basic one-thread blocker; all blocked threads must
+ // use one of these.
+
+ struct ThreadBlocker : public Blocker {
+ ThreadBlocker()
+ {
+ }
+
+ ThreadBlocker(Thread *THREAD)
+ {
+ thread = THREAD;
+ }
+
+ void wake();
+ void unblock();
+ };
+
+ // A thread that needs to block on more than one blocker can create
+ // multiple CascadeBlockers all pointing to the same ThreadBlocker.
+ // The thread wakes when any of the cascades has been woken.
+
+ struct CascadeBlocker : public Blocker {
+ CascadeBlocker()
+ {
+ }
+
+ CascadeBlocker(Blocker *BLOCKER)
+ {
+ blocker = BLOCKER;
+ thread = blocker->thread;
+ }
+
+ Blocker *blocker;
+ void wake();
+ void unblock();
+ };
+
+ // Waitqueues are queues of Blockers that can be woken up either one
+ // at a time in priority order or all at once, at the waker's option.
+ // Unlike Blockers, waitqueues do not have a blocked field; blocking
+ // threads must check for completion after adding themselves to the
+ // wait queue. When a blocker is woken, it is removed from the waitqueue.
+
+ class WaitQueue {
+ Util::List blockers;
+ Lock::SpinLock lock;
+
+ Blocker *unblock_one_nolock();
+
+ public:
+ void block(Blocker *blocker);
+ void unblock(Blocker *blocker);
+
+ bool empty();
+
+ // Returns true if a task was successfully woken
+ bool wake_one();
+
+ // Returns the number of tasks successfully woken
+ int wake_all();
+
+ // Like wake_one, but return the thread instead of waking it.
+ Blocker *unblock_one();
+ };
+
+ class Thread {
+ // This must be first.
+ Arch::ArchThread arch;
+
+ // If the thread is currently running, this contains the time of
+ // the context switch when the thread most recently started
+ // running.
+
+ Time::Time last_time;
+
+ // This contains the time in ns that the thread has remaining in
+ // its current timeslice.
+
+ s32 time_left;
+
+ // The current timeslice length in ns. For timeshared tasks, this
+ // is simply the priority scaled linearly.
+
+ u32 time_slice;
+
+ // Real-time and timeshared priorities
+ //
+ // Real-time priorities go from 0 to 255, with 0 indicating a
+ // timeshared task.
+ //
+ // Static timeshared priorities go from 1 to 16; they determine the
+ // timeslice of a task (not including interactivity bonuses). The
+ // default is 8. The dynamic timeshared priority is the priority
+ // associated with time_left as of the last global timeslice
+ // replenishment or wake-up, and ranges from 1 to 31. If the
+ // static priority is x, the dynamic priority is within the range
+ // [x,2x-1].
+
+ int rt_prio, ts_static_prio, ts_prio, last_replenish;
+
+ enum Policy {
+ TimeShared,
+ FIFO,
+ RoundRobin
+ } policy;
+
+ ThreadBlocker *blocked_on;
+ Util::List runqueue_node;
+
+ void ts_add();
+ void ts_del();
+ void ts_deplete();
+
+ void add();
+ void del();
+ void replenish();
+ void charge(Time::Time &now);
+ void prio_adjust();
+ void wake_nolock();
+
+ public:
+ Util::List threadlist_node;
+ Mem::AddrSpace *addr_space, *active_addr_space;
+
+ enum {
+ name_len = 32
+ };
+
+ char name[name_len];
+
+ enum {
+ internal_orbstack_frames = 8
+ };
+
+ ORB::CallStackHeader orbstack;
+ ORB::CallFrame orbstackframes[internal_orbstack_frames];
+
+ // The header and frame of the method invocation at the top of
+ // the stack. FIXME: add a lock aronud these if other threads
+ // can read these to do a traceback/traceforward.
+
+ ORB::CallStackHeader *orbstack_top_hdr;
+ int orbstack_top; // Index into orbstack_top_hdr->frames[]
+
+ ~Thread();
+ void exit();
+ void block(ThreadBlocker *blocker);
+ void wake();
+
+ friend class Sched;
+ friend class WaitQueue;
+
+ friend void Arch::switch_thread(Thread *dest, Thread *src);
+
+ // FIXME: temp hack; use a Process later
+
+ void set_aspace(Mem::AddrSpace *aspace);
+ };
+
+ enum {
+ thread_size = sizeof(Thread) + 7 / 8
+ };
+}
+
+#include <arch/current.h>
+#define curthread (::Arch::get_current_thread())
+
+#endif
--- /dev/null
+#ifndef _KERN_TIME_H
+#define _KERN_TIME_H
+
+#include <kernel/time.h>
+#include <util/spinlock.h>
+
+namespace Time {
+ // The clock timer ticks 20 times per second. This timer is only used
+ // for maintaining the wall clock time; user timers, rescheduling, and
+ // other purposes are dealt with separately, so this timer only needs
+ // to run often enough to avoid overrun problems.
+
+ static const int clock_tick_rate = 20;
+
+ static const Time clock_tick_increment = {
+ 0, 1000000000 / clock_tick_rate
+ };
+
+ // KTimerEntry overrides the standard TimerEntry in order
+ // to bypass the asynchronous Notifier, so that low-level
+ // things like rescheduling timers can work.
+
+ struct KTimerEntry : public TimerEntry {
+ public:
+ KTimerEntry()
+ {
+ }
+
+ KTimerEntry(TimerMux *MUX) : TimerEntry(MUX)
+ {
+ }
+
+ typedef void (*functype)(KTimerEntry *kte);
+ functype func;
+ void *data;
+
+ void action()
+ {
+ func(this);
+ }
+ };
+
+ // This clock does not change when the wall clock is changed, but
+ // rather increases monotonically. Most timers should use this
+ // rather than wall time.
+
+ class MonotonicClock {
+ s64 llclocks_per_second;
+ s64 llclocks_per_tick;
+
+ // The clock's value as of the last tick.
+
+ Time last_tick_time;
+ s64 last_tick_llclock;
+
+ Lock::SpinLock lock;
+
+ s32 get_nanos(s32 llclocks)
+ {
+ return llclocks * 1000000000LL / llclocks_per_second;
+ }
+
+ KTimerEntry tick_timer;
+
+ static void timer_tick(KTimerEntry *entry);
+
+ public:
+ #include <servers/core/time/Time/MonotonicClock.h>
+
+ MonotonicClock()
+ {
+ init_iface();
+ }
+
+ virtual ~MonotonicClock()
+ {
+ }
+
+ void get_time(Time *time);
+ void get_resolution(Time *res);
+
+ void new_timer(Timer *timer);
+
+ void calibrate(u64 llclocks_per_second_init)
+ {
+ llclocks_per_second = llclocks_per_second_init;
+ llclocks_per_tick = llclocks_per_second / clock_tick_rate;
+
+ printf("MonotonicClock::calibrate: %lld llclocks per second, %lld per tick\n",
+ llclocks_per_second, llclocks_per_tick);
+ }
+
+ // This is called every clock tick to update last_tick_time
+ // and last_tick_nanos.
+
+ void tick();
+
+ void init_tick_timer();
+ };
+
+ extern MonotonicClock monotonic_clock;
+
+ class HWTimer {
+ protected:
+ Time expiry;
+ bool armed;
+
+ public:
+ #include <servers/core/time/Time/HWTimer.h>
+
+ HWTimer()
+ {
+ init_iface();
+ expiry.seconds = 0;
+ expiry.nanos = 0;
+ }
+
+ virtual ~HWTimer()
+ {
+ }
+
+ // If a hardware timer is slow to reprogram, it may ignore
+ // arm() if time is later than the currently programmed time.
+ // Returns true if the timer action should be run immediately.
+
+ virtual void arm(Time new_expiry) = 0;
+ virtual void disarm() = 0;
+ virtual void calibrate() = 0;
+
+ void get_expiry(Time *expiry, uint8_t *armed);
+
+ void set_action(System::Events::Event *event)
+ {
+ BUG();
+ }
+ };
+
+ extern HWTimer *hw_timer;
+ extern TimerMux *monotonic_timers;
+ extern KTimerEntry *tick_timer;
+ void init();
+}
+
+#endif
--- /dev/null
+#ifndef _KERN_TYPES_H
+#define _KERN_TYPES_H
+
+#include <lowlevel/types.h>
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+typedef int8_t s8;
+typedef int16_t s16;
+typedef int32_t s32;
+typedef int64_t s64;
+
+typedef u8 octet;
+
+typedef unsigned char uchar;
+typedef unsigned short ushort;
+typedef unsigned int uint;
+typedef unsigned long ulong;
+
+static inline bool valid_size_t(u64 size)
+{
+ void unsupported_size_t();
+
+ if (sizeof(size_t) == 8)
+ return true;
+
+ if (sizeof(size_t) != 4)
+ unsupported_size_t();
+
+ return (size >> 32) == 0;
+}
+
+#endif
--- /dev/null
+#include <kern/types.h>
+
+#define _PTRDIFF_T
+#define _SIZE_T
+
--- /dev/null
+#include <kern/libc.h>
+
--- /dev/null
+include io/console/Makefile
+include io/irq/Makefile
+include io/timer/Makefile
--- /dev/null
+DIR := io/console/
+DIRS += $(DIR)
+
+RAW_CXXFILES := misc vga
+CXXFILES += $(RAW_CXXFILES:%=$(DIR)%)
--- /dev/null
+The kernel console is a low-level implementation that only provides
+the minimal features necessary and/or useful for debugging the kernel
+and low-level user software. Currently, only the System.IO.OStream
+interface is required to be implemented. Video-based consoles will
+likely also have an interface that allows one to request a video mode
+change to display a catastrophic error message, but that can wait
+until there are user space programs that change the video mode.
--- /dev/null
+// io/console/misc.cc -- Generic code relating to kernel debug consoles
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/console.h>
+#include <kern/libc.h>
+#include <util/spinlock.h>
+
+namespace IO {
+namespace Console {
+ void Console::write(char *data, size_t len)
+ {
+ u64 len64 = len;
+ write(Array<octet>((octet *)data, len), &len64);
+ }
+
+ // All consoles will set this to themselves; the last console
+ // to be initialized will be used. At some point, the console
+ // used will be configurable (including the ability to broadcast
+ // to multiple consoles).
+
+ Console *primary_console;
+}
+}
+
+static const size_t printf_buffer_size = 4096;
+static char printf_buffer[printf_buffer_size];
+Lock::SpinLock printf_lock;
+
+// Note: this will not retry with a larger buffer if overflow
+// happens. If you really need that, you'll need to call snprintf
+// and primary_console->write yourself.
+
+void vga_write(uint8_t *buf, u64 len);
+
+size_t printf(const char *str, ...)
+{
+ Lock::AutoSpinLockRecIRQ autolock(printf_lock);
+
+ va_list args;
+ va_start(args, str);
+ size_t ret = vsnprintf(printf_buffer, printf_buffer_size, str, args);
+ va_end(args);
+
+ if (ret > printf_buffer_size)
+ ret = printf_buffer_size;
+
+ IO::Console::primary_console->write(printf_buffer, ret);
+ return ret;
+}
--- /dev/null
+// io/console/vga.cc -- Kernel debugging console for standard VGA text mode
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <System/IO.h>
+#include <kern/kernel.h>
+#include <kern/console.h>
+#include <kern/notifier.h>
+#include <kern/io.h>
+#include <kern/libc.h>
+#include <kern/addrs.h>
+#include <util/lock.h>
+
+void vga_write(char *str);
+
+namespace IO {
+namespace Console {
+ class VGA : public Console {
+ Lock::SpinLock lock;
+
+ u16 *mem_base;
+ unsigned long io_base;
+
+ int line_width;
+ int lines;
+ int pos;
+
+ void putc(int ch, int attr)
+ {
+ if (ch == '\n') {
+ pos += line_width;
+ pos -= pos % line_width;
+ } else if (ch == '\r') {
+ pos -= pos % line_width;
+ } else {
+ // Use ll_swap_le16 rather than store_le16 in the hopes
+ // that the compiler can simply change the shifts around
+ // rather than assembling it one way and then ll_swapping.
+ //
+ // It's done as one atomic store rather than two single
+ // byte writes (which would eliminate endianness concerns)
+ // to eliminate the possibility of a race where the video
+ // card reads a character before its attribute has been
+ // written.
+
+ mem_base[pos++] = ll_swap_le16(ch | (attr << 8));
+ }
+
+ if (pos >= lines * line_width) {
+ pos -= line_width;
+
+ // FIXME: until memcpy is more than a stupid byte-at-a-time
+ // loop, this could race with the video card.
+
+ memcpy(mem_base, mem_base + line_width,
+ line_width * (lines - 1) * sizeof(u16));
+
+ for (int i = 0; i < line_width; i++)
+ mem_base[pos + i] = ll_swap_le16(' ' | (7 << 8));
+ }
+ }
+
+ void get_cursor()
+ {
+ ll_out_8(0x3d4, 14);
+ pos = (u16)ll_in_8(0x3d5) << 8;
+ ll_out_8(0x3d4, 15);
+ pos |= ll_in_8(0x3d5);
+ }
+
+ void set_cursor()
+ {
+ ll_out_8(0x3d4, 14);
+ ll_out_8(0x3d5, pos >> 8);
+ ll_out_8(0x3d4, 15);
+ ll_out_8(0x3d5, pos & 0xff);
+ }
+
+ public:
+ #include <servers/io/console/vga/IO/Console/VGA.h>
+
+ VGA()
+ {
+ init_iface();
+
+ // FIXME: should be PCI mem space, not phys mem.
+ mem_base = (u16 *)Arch::phys_to_kvirt(0xb8000);
+ io_base = 0x3d0;
+
+ // FIXME: detect
+ line_width = 80;
+ lines = 25;
+
+ primary_console = this;
+ }
+
+ void write(Array<octet> buf, u64 *LEN)
+ {
+ if (!valid_size_t(*LEN))
+ /* throw something */
+ return;
+
+ size_t len = *LEN;
+
+ if (len > buf.count)
+ /* throw something */
+ return;
+
+ Lock::AutoSpinLockRecIRQ autolock(lock);
+ get_cursor();
+
+ for (size_t i = 0; i < len; i++)
+ putc(buf.ptr[i], 7);
+
+ set_cursor();
+ }
+
+ void write_async(Array<octet> buf, u64 len, Notifier notifier)
+ {
+ write(buf, &len);
+ io_notify(notifier, len, io_result::Success);
+ }
+ };
+
+ // Use a static constructor, so that it can be used before
+ // memory management is up and running.
+
+ VGA vga;
+}
+}
+
+#include <servers/io/console/vga/footer.cc>
--- /dev/null
+class IO.Console.VGA : System.IO.OStream;
--- /dev/null
+DIR := io/irq/
+DIRS += $(DIR)
+
+RAW_CXXFILES := i8259
+CXXFILES += $(RAW_CXXFILES:%=$(DIR)%)
--- /dev/null
+// io/irq/i8259.cc -- Intel 8259-compatible IRQ support
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/irq.h>
+#include <kern/kernel.h>
+#include <kern/io.h>
+#include <kern/i8259.h>
+
+namespace IRQ {
+ using namespace IRQ;
+
+ // This assumes a second i8259 cascaded on IRQ2 of the first i8259,
+ // with I/O port bases of 0x20 and 0xa0, as every i8259 I've
+ // encountered has been configured that way, even on non-x86
+ // platforms. This can be made more general later if need be.
+ //
+ // FIXME: For non-x86 platforms, the I/O window base must be added in.
+
+ inline void I8259::output_mask(int off)
+ {
+ ll_out_8(off ? 0xa1 : 0x21, cached_masks[0]);
+ }
+
+ inline void I8259::mask(u32 irq)
+ {
+ assert(irq < 16);
+
+ int off = irq / 8;
+ int bit = irq % 8;
+
+ cached_masks[off] |= (1 << bit);
+ output_mask(off);
+ }
+
+ void I8259::unmask(u32 irq)
+ {
+ assert(irq < 16);
+
+ int off = irq / 8;
+ int bit = irq % 8;
+
+ cached_masks[off] &= ~(1 << bit);
+ output_mask(off);
+ }
+
+ void I8259::mask_and_ack(u32 irq)
+ {
+ assert(irq < 16);
+ int bit = irq % 8;
+
+ mask(irq);
+
+ if (irq < 8) {
+ ll_out_8(0x20, 0x60 | bit);
+ } else {
+ ll_out_8(0xa0, 0x60 | bit);
+ ll_out_8(0x20, 0x62);
+ }
+ }
+
+ int I8259::get_pending_irq()
+ {
+ #if defined(_LL_ARCH_x86) || defined(_LL_ARCH_x64)
+ assert(0);
+ return -1;
+ #else
+ ll_out_8(0x20, 0x0b);
+ u8 ret = ll_in_8(0x20);
+
+ if (!(ret & 0x80))
+ return -1;
+
+ ret &= 7;
+
+ if (ret != 2)
+ return ret;
+
+ ll_out_8(0xa0, 0x0b);
+ ret = ll_in_8(0xa0);
+
+ // Shouldn't happen, as even if the device de-asserts the
+ // cascaded interrupt, it should wait for EOI.
+
+ if (!(ret & 0x80))
+ return -1;
+
+ return (ret & 7) | 8;
+ #endif
+ }
+
+ void I8259::init()
+ {
+ // Initialize each 8259 for edge triggered, cascade, 8086 mode.
+ // Interrupt vectors are set to 0x20-0x2f (this only matters on
+ // x86/x64).
+
+ ll_out_8(0x20, 0x11);
+ ll_out_8(0x21, 0x20);
+ ll_out_8(0x21, 0x04);
+ ll_out_8(0x21, 0x01);
+
+ ll_out_8(0xa0, 0x11);
+ ll_out_8(0xa1, 0x28);
+ ll_out_8(0xa1, 0x02);
+ ll_out_8(0xa1, 0x01);
+
+ // Mask all IRQs initially.
+
+ cached_masks[0] = cached_masks[1] = 0xff;
+ ll_out_8(0x21, 0xff);
+ ll_out_8(0xa1, 0xff);
+
+ bzero(irqslots, sizeof(irqslots));
+
+ for (int i = 0; i < 16; i++)
+ irqslots[i].controller = this;
+
+ irqs = irqslots;
+ num_irqs = 16;
+ }
+
+ I8259 i8259;
+}
--- /dev/null
+DIR := io/timer/
+DIRS += $(DIR)
+
+RAW_CXXFILES := i8254
+CXXFILES += $(RAW_CXXFILES:%=$(DIR)%)
--- /dev/null
+// io/timer/i8254.cc -- Intel 8254 and compatible timer driver
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/time.h>
+#include <kern/io.h>
+#include <lowlevel/clock.h>
+
+namespace IO {
+namespace Timer {
+ using Time::HWTimer;
+ using Time::monotonic_clock;
+ using Time::hw_timer;
+ using Time::Time;
+
+ class I8254 : public HWTimer {
+ Lock::SpinLock lock;
+ bool armed;
+
+ u16 read_time()
+ {
+ ll_out_8(0x43, 0);
+ u16 ret = ll_in_8(0x40);
+ ret |= (u16)ll_in_8(0x40) << 8;
+ return ret;
+ }
+
+ void wait_for_zero()
+ {
+ u16 val;
+
+ do {
+ val = read_time();
+ } while (val < 0x8000);
+
+ do {
+ val = read_time();
+ } while (val > 0x8000);
+ }
+
+ enum {
+ clock_freq = 1193182
+ };
+
+ public:
+ I8254()
+ {
+ hw_timer = this;
+ armed = false;
+ }
+
+ void calibrate()
+ {
+ wait_for_zero();
+ s64 start_tick = ll_getclock();
+ wait_for_zero();
+ wait_for_zero();
+ s64 end_tick = ll_getclock();
+
+ s64 ticks_per_second = (end_tick - start_tick) *
+ (u64)clock_freq / 0x20000;
+
+ monotonic_clock.calibrate(ticks_per_second);
+ }
+
+ void arm(Time new_expiry)
+ {
+ // Interrupts should always be disabled when
+ // this is called.
+
+ Lock::AutoSpinLock autolock(lock);
+
+ // The 8254 is often slow to program, so don't re-program if the
+ // expiry is the same. To accomodate this, the tick timer must
+ // be set to go off often enough that the expiry is never more
+ // than 0xffff 8254 ticks (about 1/18.2 sec) in the future.
+
+ if (armed && expiry != new_expiry) {
+ Time now;
+ monotonic_clock.get_time(&now);
+ Time rel_expiry = new_expiry - now;
+ expiry = new_expiry;
+
+ u32 ticks;
+
+ assert(rel_expiry.seconds <= 0);
+
+ if (rel_expiry.seconds < 0)
+ ticks = 1;
+ else {
+ // Add one extra tick to make sure we round up rather than
+ // down; otherwise, we'll just end up programming the timer
+ // for one tick and trying again.
+
+ ticks = rel_expiry.nanos * (u64)clock_freq /
+ 1000000000ULL + 1;
+
+ assert(ticks <= 0xffff);
+
+ if (ticks == 0)
+ ticks = 1;
+ }
+
+ if (!armed)
+ ll_out_8(0x43, 0x30);
+
+ ll_out_8(0x40, ticks & 0xff);
+ ll_out_8(0x40, ticks >> 8);
+ armed = true;
+ }
+ }
+
+ void disarm()
+ {
+ armed = false;
+ }
+ };
+
+ I8254 i8254;
+}
+}
--- /dev/null
+DIR := lib/
+DIRS += $(DIR)
+
+RAW_CXXFILES := orb libc ctors
+CXXFILES += $(RAW_CXXFILES:%=$(DIR)%)
+
+include lib/kernel/Makefile.kernel
--- /dev/null
+// lib/ctors.cc -- Run C++ global constructors
+//
+// Destructors are not run at shutdown, as we won't be relying on global
+// destructors to do any cleanup. Constuctors are needed even if we don't
+// explicitly use them, however, as GCC sometimes uses them implicitly
+// to initialize global data.
+
+#include <kern/libc.h>
+#include <kern/mem.h>
+#include <lowlevel/barriers.h>
+
+void run_ctors()
+{
+ typedef void (*ctor)();
+ extern ctor ctors;
+
+ for (ctor *ptr = &ctors; *ptr; ptr++)
+ (*ptr)();
+}
+
+// Global constructors call this to register global destructors
+// with some versions and/or configurations of GCC. I'm not sure
+// why just using a static dtor table isn't good enough.
+
+extern "C" int __cxa_atexit(void (*func)(void *), void *arg, void *d)
+{
+ return 0;
+}
+
+// More crap we don't care about (we don't use global destructors),
+// but GCC requires.
+
+int __dso_handle;
--- /dev/null
+../../lib/kernel
\ No newline at end of file
--- /dev/null
+// lib/libc.cc -- Standard C-library functions
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+
+#include <kern/types.h>
+#include <kern/libc.h>
+
+#include <stdarg.h>
+#include <limits.h>
+
+// FIXME: Move printf to lib/kernel
+
+static const int alt_form = 0x0001;
+static const int zero_pad = 0x0002;
+static const int neg_field = 0x0004;
+static const int leave_blank = 0x0008;
+static const int always_sign = 0x0010;
+static const int group_thousands = 0x0020; // FIXME -- unimplemented
+static const int long_arg = 0x0040;
+static const int long_long_arg = 0x0080;
+static const int short_arg = 0x0100;
+static const int short_short_arg = 0x0200;
+static const int intmax_arg = 0x0400;
+static const int ptrdiff_arg = 0x0800;
+static const int size_t_arg = 0x1000;
+static const int capital_hex = 0x2000;
+static const int num_signed = 0x4000;
+static const int has_precision = 0x8000;
+
+static void printf_string(char *buf, size_t &opos, size_t limit,
+ char *src, size_t len)
+{
+ if (opos < limit) {
+ size_t olen = opos + len <= limit ? len : limit - opos;
+ memcpy(buf + opos, src, olen);
+ }
+
+ opos += len;
+}
+
+static void printf_fill(char *buf, size_t &opos, size_t limit,
+ char ch, int len)
+{
+ if (opos < limit) {
+ size_t olen = opos + len <= limit ? len : limit - opos;
+ memset(buf + opos, ch, olen);
+ }
+
+ opos += len;
+}
+
+static void printf_num(char *obuf, size_t &opos, size_t limit,
+ s64 value, long radix, int fieldwidth,
+ int precision, int flags)
+{
+ char buf[65];
+ int pos = 64;
+ int letter = (flags & capital_hex) ? 'A' - 10 : 'a' - 10;
+ u64 uval;
+
+ if (flags & num_signed)
+ uval = value < 0 ? -value : value;
+ else
+ uval = value;
+
+ // An explicit precision of 0 suppresses all output if the value
+ // is zero. Otherwise, the output size is not limited by precision
+ // or field width.
+
+ if (uval != 0 || !(flags & has_precision) || precision != 0) do {
+ int ch = uval % radix;
+
+ if (ch < 10)
+ buf[pos] = ch + '0';
+ else
+ buf[pos] = ch + letter;
+
+ uval /= radix;
+ pos--;
+ } while (uval);
+
+ int len = 64 - pos;
+
+ // length which counts against fieldwidth but not precision
+ int extralen = 0;
+
+ if (flags & num_signed) {
+ if (value < 0) {
+ printf_fill(obuf, opos, limit, '-', 1);
+ extralen += 1;
+ } else if (flags & always_sign) {
+ printf_fill(obuf, opos, limit, '+', 1);
+ extralen += 1;
+ } else if (flags & leave_blank) {
+ printf_fill(obuf, opos, limit, ' ', 1);
+ extralen += 1;
+ }
+ }
+
+ if ((flags & alt_form) && value != 0) {
+ if (radix == 8 && (!(flags & has_precision) || precision <= len)) {
+ flags |= has_precision;
+ precision = len + 1;
+ }
+
+ if (radix == 16) {
+ printf_string(obuf, opos, limit, "0x", 2);
+ extralen += 2;
+ }
+ }
+
+ if ((flags & has_precision) && len < precision) {
+ precision -= len;
+ len += precision;
+ } else {
+ precision = 0;
+ }
+
+ len += extralen;
+
+ if (!(flags & neg_field) && len < fieldwidth) {
+ char padchar = (flags & zero_pad) ? '0' : ' ';
+ printf_fill(obuf, opos, limit, padchar, fieldwidth - len);
+ len = fieldwidth;
+ }
+
+ if (precision != 0) {
+ printf_fill(obuf, opos, limit, '0', precision);
+ len += precision;
+ }
+
+ printf_string(obuf, opos, limit, buf + pos + 1, 64 - pos);
+
+ if ((flags & neg_field) && len < fieldwidth)
+ printf_fill(obuf, opos, limit, ' ', fieldwidth - len);
+}
+
+size_t vsnprintf(char *buf, size_t size, const char *str, va_list args)
+{
+ size_t opos = 0; // position in the output string
+ unsigned int flags = 0;
+ int radix = 10;
+ int state = 0;
+ int fieldwidth = 0;
+ int precision = 0;
+
+ for (size_t pos = 0; str[pos]; pos++) switch (state) {
+ case 0:
+ if (str[pos] == '%') {
+ flags = 0;
+ radix = 10;
+ state = 1;
+ fieldwidth = 0;
+ precision = 0;
+ break;
+ }
+
+ if (opos < size)
+ buf[opos] = str[pos];
+
+ opos++;
+ break;
+
+ case 1: // A percent has been seen; read in format characters
+ switch (str[pos]) {
+ case '#':
+ flags |= alt_form;
+ break;
+
+ case '0':
+ if (!(flags & has_precision)) {
+ flags |= zero_pad;
+ break;
+ }
+
+ // else fall through
+
+ case '1' ... '9':
+ if (flags & has_precision)
+ goto default_case;
+
+ do {
+ fieldwidth *= 10;
+ fieldwidth += str[pos++] - '0';
+ } while (str[pos] >= '0' && str[pos] <= '9');
+
+ pos--;
+ break;
+
+ case '*':
+ if (fieldwidth || (flags & has_precision))
+ goto default_case;
+
+ fieldwidth = va_arg(args, int);
+ break;
+
+ case '.':
+ flags |= has_precision;
+
+ if (str[pos + 1] == '*') {
+ pos++;
+ precision = va_arg(args, int);
+ } else while (str[pos + 1] >= '0' && str[pos + 1] <= '9') {
+ precision *= 10;
+ precision += str[++pos] - '0';
+ }
+
+ break;
+
+ case '-':
+ flags |= neg_field;
+ break;
+
+ case ' ':
+ flags |= leave_blank;
+ break;
+
+ case '+':
+ flags |= always_sign;
+ break;
+
+ case '\'':
+ flags |= group_thousands;
+ break;
+
+ case 'l':
+ if (flags & long_arg)
+ flags |= long_long_arg;
+ else
+ flags |= long_arg;
+
+ break;
+
+ case 'h':
+ if (flags & long_arg)
+ flags |= short_short_arg;
+ else
+ flags |= short_arg;
+
+ break;
+
+ case 'j':
+ flags |= intmax_arg;
+ break;
+
+ case 't':
+ flags |= ptrdiff_arg;
+ break;
+
+ // Note that %z and other such "new" format characters are
+ // basically useless because some GCC coder actually went out
+ // of their way to make the compiler reject C99 format
+ // strings in C++ code, with no way of overriding it that I
+ // can find (the source code comments suggest the checking is
+ // only when using -pedantic, but I wasn't using -pedantic).
+ //
+ // Thus, we have the choice of either avoiding %z and friends
+ // (and possibly needing to insert hackish casts to silence
+ // the compiler's warnings if different architectures define
+ // types like size_t in different ways), or not using the
+ // format warnings at all.
+ //
+ // To mitigate this, 32-bit architectures should define
+ // pointer-sized special types as "long" rather than "int",
+ // so that %lx/%ld can always be used with them. Fixed-size
+ // 32-bit types should be declared as "int" rather than
+ // "long" for the same reason.
+
+ case 'z':
+ flags |= size_t_arg;
+ break;
+
+ case 'd':
+ case 'i': {
+ s64 arg;
+
+ if ((flags & intmax_arg) || (flags & long_long_arg))
+ arg = va_arg(args, long long);
+ else if (flags & size_t_arg)
+ arg = va_arg(args, ssize_t);
+ else if (flags & ptrdiff_arg)
+ arg = va_arg(args, ptrdiff_t);
+ else if (flags & long_arg)
+ arg = va_arg(args, long);
+ else if (flags & short_short_arg)
+ arg = (signed char)va_arg(args, int);
+ else if (flags & short_arg)
+ arg = (short)va_arg(args, int);
+ else
+ arg = va_arg(args, int);
+
+ flags |= num_signed;
+ printf_num(buf, opos, size, arg, 10,
+ fieldwidth, precision, flags);
+ state = 0;
+ break;
+ }
+
+ case 'X':
+ flags |= capital_hex;
+ // fall-through
+
+ case 'x':
+ radix = 18;
+ // fall-through
+
+ case 'o':
+ radix -= 2;
+ // fall-through
+
+ case 'u': {
+ u64 arg;
+
+ if ((flags & intmax_arg) || (flags & long_long_arg))
+ arg = va_arg(args, unsigned long long);
+ else if (flags & size_t_arg)
+ arg = va_arg(args, size_t);
+ else if (flags & ptrdiff_arg)
+ arg = va_arg(args, intptr_t);
+ else if (flags & long_arg)
+ arg = va_arg(args, unsigned long);
+ else if (flags & short_short_arg)
+ arg = (unsigned char)va_arg(args, unsigned int);
+ else if (flags & short_arg)
+ arg = (unsigned short)va_arg(args, unsigned int);
+ else if (flags & short_short_arg)
+ arg = (signed char)va_arg(args, int);
+ else if (flags & short_arg)
+ arg = (short)va_arg(args, int);
+ else
+ arg = va_arg(args, unsigned int);
+
+ printf_num(buf, opos, size, arg, radix,
+ fieldwidth, precision, flags);
+ state = 0;
+ break;
+ }
+
+ case 'c':
+ if (opos < size)
+ buf[opos] = va_arg(args, int);
+
+ opos++;
+ state = 0;
+ break;
+
+ case 's': {
+ char *arg = va_arg(args, char *);
+
+ if (!arg)
+ arg = "(null)";
+
+ size_t len = strlen(arg);
+ printf_string(buf, opos, size, arg, len);
+ state = 0;
+ break;
+ }
+
+ case 'p': {
+ void *arg = va_arg(args, void *);
+
+ printf_num(buf, opos, size, (ulong)arg, 16,
+ fieldwidth, precision, flags);
+
+ state = 0;
+ break;
+ }
+
+ case 'n': {
+ if ((flags & intmax_arg) || (flags & long_long_arg))
+ *va_arg(args, unsigned long long *) = opos;
+ else if (flags & size_t_arg)
+ *va_arg(args, ssize_t *) = opos;
+ else if (flags & ptrdiff_arg)
+ *va_arg(args, ptrdiff_t *) = opos;
+ else if (flags & long_arg)
+ *va_arg(args, long *) = opos;
+ else if (flags & short_short_arg)
+ *va_arg(args, signed char *) = opos;
+ else if (flags & short_arg)
+ *va_arg(args, short *) = opos;
+ else
+ *va_arg(args, int *) = opos;
+
+ state = 0;
+ break;
+ }
+
+ default_case: // label for goto
+ default:
+ if (opos < size)
+ buf[opos] = str[pos];
+
+ opos++;
+ state = 0;
+ break;
+ }
+ }
+
+ if (size > 0 && opos >= size)
+ buf[size - 1] = 0;
+
+ return opos;
+}
+
+size_t snprintf(char *buf, size_t size, const char *str, ...)
+{
+ va_list args;
+ va_start(args, str);
+ int ret = vsnprintf(buf, size, str, args);
+ va_end(args);
+ return ret;
+}
+
+size_t sprintf(char *buf, const char *str, ...)
+{
+ va_list args;
+ va_start(args, str);
+ int ret = vsnprintf(buf, ULONG_MAX, str, args);
+ va_end(args);
+ return ret;
+}
+
+void *memcpy(void *dest, const void *src, size_t len)
+{
+ const char *cs = static_cast<const char *>(src);
+ char *cd = static_cast<char *>(dest);
+
+ for (size_t i = 0; i < len; i++)
+ cd[i] = cs[i];
+
+ return dest;
+}
+
+void *memmove(void *dest, const void *src, size_t len)
+{
+ if (dest < src)
+ return memcpy(dest, src, len);
+
+ const char *cs = static_cast<const char *>(src);
+ char *cd = static_cast<char *>(dest);
+
+ for (size_t i = len - 1; i >= 0; i--)
+ cd[i] = cs[i];
+
+ return dest;
+}
+
+int memcmp(const void *b1, const void *b2, size_t len)
+{
+ size_t pos;
+ const char *c1 = static_cast<const char *>(b1);
+ const char *c2 = static_cast<const char *>(b2);
+
+ for (pos = 0; pos < len; pos++) {
+ if (c1[pos] != c2[pos])
+ return c1[pos] - c2[pos];
+
+ pos++;
+ }
+
+ return 0;
+}
+
+size_t strnlen(const char *s, size_t n)
+{
+ size_t pos = 0;
+ while (pos < n && *s++)
+ pos++;
+ return pos;
+}
+
+size_t strlen(const char *s)
+{
+ size_t pos = 0;
+ while (*s++)
+ pos++;
+ return pos;
+}
+
+char *strcpy(char *dest, const char *src)
+{
+ char *orig = dest;
+
+ do {
+ *dest = *src++;
+ } while (*dest++);
+
+ return orig;
+}
+
+char *strncpy(char *dest, const char *src, size_t len)
+{
+ char *orig = dest;
+
+ while (len--) {
+ *dest = *src++;
+
+ if (!*dest++)
+ break;
+ }
+
+ bzero(dest, len);
+ return orig;
+}
+
+void bzero(void *b, size_t len)
+{
+ char *c = static_cast<char *>(b);
+
+ while (len--)
+ *c++ = 0;
+}
+
+void *memset(void *b, int ch, size_t len)
+{
+ char *c = static_cast<char *>(b);
+
+ while (len--)
+ *c++ = ch;
+
+ return b;
+}
+
+#include <kern/pagealloc.h>
+
+// Temporary hack until slab allocator is added
+
+void *malloc(size_t len)
+{
+ len = (len + sizeof(size_t) + Arch::page_size - 1) / Arch::page_size;
+ Mem::Page *page = Mem::PageAlloc::alloc(len);
+
+ size_t *ptr = (size_t *)Mem::page_to_kvirt(page);
+ *ptr = len;
+
+ return ptr + 1;
+}
+
+void free(void *addr)
+{
+ if (addr) {
+ size_t *ptr = (size_t *)addr;
+ ptr--;
+ size_t len = *ptr;
+ Mem::Page *page = Mem::kvirt_to_page(addr);
+ Mem::PageAlloc::free(page, len);
+ }
+}
+
+void *operator new(size_t len)
+{
+ return malloc(len);
+}
+
+void *operator new[](size_t len)
+{
+ return malloc(len);
+}
+
+void operator delete(void *addr)
+{
+ free(addr);
+}
+
+void operator delete[](void *addr)
+{
+ free(addr);
+}
+
+extern "C" void __cxa_pure_virtual()
+{
+ BUG();
+}
+
+void abort()
+{
+ in_fault++;
+ printf("abort() called in kernel\n");
+ __builtin_trap();
+}
--- /dev/null
+../../lib/c++/orb.cc
\ No newline at end of file
--- /dev/null
+DIR := mem/
+DIRS += $(DIR)
+
+// FIXME: only include pagetable.cc if the arch wants it
+RAW_CXXFILES := pagealloc addrspace orbmm pagetable rmap
+CXXFILES += $(RAW_CXXFILES:%=$(DIR)%)
--- /dev/null
+// mem/addrspace.cc -- System.Mem.AddrSpace
+//
+// OPT: Special AddrSpaces that only translate/export a linear block of
+// another AddrSpace, and don't have individual entries for every page.
+//
+// OPT: Special VAreas that use their own translation mechanism instead
+// of varea->offset, so that filesystem block tables (and similar things)
+// don't need to have a VArea per block.
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/mem.h>
+#include <kern/paging.h>
+#include <kern/pagetable.h>
+#include <kern/pagealloc.h>
+#include <kern/generic-pte.h>
+#include <kern/compiler.h>
+
+extern int roshared_start, roshared_page_end;
+extern int rwshared_start, rwshared_page_end;
+
+namespace Mem {
+ extern IMappable physmem;
+
+ class AddrSpaceFactory {
+ public:
+ #include <servers/mem/addrspace/Mem/AddrSpaceFactory.h>
+
+ AddrSpaceFactory()
+ {
+ init_iface();
+ }
+
+ void create(Object *obj)
+ {
+ *obj = static_cast<IAddrSpace>(*(new AddrSpace(false)));
+ }
+ };
+
+ class ProcAddrSpaceFactory {
+ public:
+ #include <servers/mem/addrspace/Mem/ProcAddrSpaceFactory.h>
+
+ ProcAddrSpaceFactory()
+ {
+ init_iface();
+ }
+
+ void create(Object *obj)
+ {
+ AddrSpace *as = new AddrSpace(true);
+ Region region;
+ MapFlags mf = 0;
+ u64 vstart;
+
+ region.start = kvirt_to_phys(&roshared_start);
+ region.end = kvirt_to_phys(&roshared_page_end);
+ vstart = Arch::roshared_map;
+ mf.Fixed = 1;
+ mf.access_IDLNS_Read = 1;
+ mf.access_IDLNS_Exec = 1;
+
+ as->map(physmem, region, &vstart, mf,
+ true, AddrSpace::map_protected);
+
+ region.start = kvirt_to_phys(&rwshared_start);
+ region.end = kvirt_to_phys(&rwshared_page_end);
+ vstart = Arch::rwshared_map;
+ mf.access_IDLNS_Exec = 0;
+ mf.access_IDLNS_Write = 1;
+ mf.CopyOnWrite = 1;
+
+ as->map(physmem, region, &vstart, mf,
+ true, AddrSpace::map_protected);
+
+ AllocFlags af = 0;
+ vstart = Arch::stack_bottom;
+ as->alloc_and_map(Arch::stack_top - vstart + 1, &vstart, af, mf);
+
+ *obj = static_cast<IAddrSpace>(*(as));
+ }
+ };
+
+ ProcAddrSpaceFactory real_proc_addrspace_factory;
+ Factory proc_addr_space_factory = real_proc_addrspace_factory;
+
+ AddrSpaceFactory real_addrspace_factory;
+ Factory addr_space_factory = real_addrspace_factory;
+
+ AddrSpace::AddrSpace(bool process) : mappable(this)
+ {
+ init_iface();
+ is_process = process;
+
+ // OPT: Allow optional use of the native PTE for stacked aspaces,
+ // either because the native PTE is 64-bit, or because it's an
+ // embedded system which does not need 64-bit storage.
+
+ if (process)
+ page_table = new PageTableImpl<Arch::PTE>(true);
+ else
+ page_table = new PageTableImpl<GenPTE>(false);
+
+ cached_free_region = Arch::user_start + Arch::page_size;
+ }
+
+ // This should only be used once during bootup to initialize the
+ // kernel's address space with a static initial page table.
+
+ AddrSpace::AddrSpace(void *ptbl_toplevel) : mappable(this)
+ {
+ init_iface();
+ is_process = true;
+ page_table = new PageTableImpl<Arch::PTE>(ptbl_toplevel);
+
+ // FIXME: should be kernel virtual space
+ cached_free_region = Arch::user_start + Arch::page_size;
+ }
+
+ void AddrSpace::get_mappable(IMappable *ma)
+ {
+ *ma = mappable;
+ }
+
+ void AddrSpace::clone(IAddrSpace *addrspace, uint8_t clone_is_real)
+ {
+ // FIXME: implement
+ *addrspace = NULL;
+ }
+
+ void AddrSpace::alloc_and_map(u64 len, u64 *vstart,
+ AllocFlags aflags, MapFlags mflags)
+ {
+ // FIXME: implement
+ }
+
+ bool AddrSpace::handle_fault(ulong vaddr, bool write, bool exec, bool user)
+ {
+ if (lock.held_by_curthread())
+ return false;
+
+ assert(!(write && exec));
+ PTEFlags reqflags;
+
+ if (user)
+ reqflags.User = 1;
+
+ if (write)
+ reqflags.Writeable = 1;
+ else if (exec)
+ reqflags.Executable = 1;
+ else
+ reqflags.Readable = 1;
+
+ reqflags.Valid = 1;
+
+ try {
+ mappable.pagein(page_align(vaddr), reqflags);
+ }
+
+ catch (BadPageFault &bpf) {
+ // FIXME: retain info about nature of bpf
+ // to throw to user?
+ return false;
+ }
+
+ return true;
+ }
+
+ bool AddrSpace::check_overlap(Region region, VirtualArea *&va)
+ {
+ if (region.end < region.start)
+ return true;
+
+ va = varea_tree.find_nearest(region.start);
+
+ if (!va)
+ return false;
+
+ // If region.start is in an existing region, that region will
+ // be returned.
+
+ if (region.end >= va->region().start &&
+ region.start <= va->region().end)
+ return true;
+
+ // If it returns a region that's greater than region.start, and va
+ // itself does not overlap, then prev does not overlap (or else
+ // region.start would be in or before prev, and thus prev would
+ // have been returned).
+
+ // If it returns a region that's less than region.start, we still
+ // need to check next, as region.end could be in (or beyond) that
+ // region.
+
+ if (va->list_node.next != &varea_list) {
+ VirtualArea *next =
+ va->list_node.next->listentry(VirtualArea, list_node);
+
+ if (region.end >= next->region().start &&
+ region.start <= next->region().end)
+ {
+ va = next;
+ return true;
+ }
+ }
+
+ VirtualArea *prev;
+
+ if (va->list_node.prev != &varea_list)
+ prev = va->list_node.prev->listentry(VirtualArea, list_node);
+ else
+ prev = NULL;
+
+ if (region.start < va->region().start) {
+ assert(!prev || prev->region().end < region.start);
+ va = prev;
+ }
+
+ return false;
+ }
+
+ VirtualArea *AddrSpace::split_varea(Region region)
+ {
+ VirtualArea *start, *mid, *end;
+
+ // check_overlap is used rather than varea_tree.find,
+ // so that the first overlapping region can be returned,
+ // as most (if not all) callers will need this anyway.
+
+ if (!check_overlap(region, start))
+ return NULL;
+
+ assert(start);
+ assert(start->aspace == this);
+ assert(start->region().end >= region.start);
+
+ if (start->region().start < region.start) {
+ // There is a varea that straddles region.start;
+ // create a new varea "mid" for the overlapping part.
+
+ mid = new VirtualArea;
+
+ mid->aspace = this;
+ mid->region().start = region.start;
+
+ if (region.end > start->region().end)
+ mid->region().end = start->region().end;
+ else
+ mid->region().end = region.end;
+
+ mid->flags = start->flags;
+ mid->ma = start->ma;
+ mid->offset = start->offset;
+
+ if (start->region().end > region.end) {
+ // The varea also straddles region.end; create a new
+ // varea "end" for the other side of the region.
+
+ end = new VirtualArea;
+
+ end->aspace = this;
+ end->region().start = region.end + 1;
+ end->region().end = start->region().end;
+
+ end->flags = start->flags;
+ end->ma = start->ma;
+ end->offset = start->offset;
+ } else {
+ end = NULL;
+ }
+
+ start->region().end = region.start - 1;
+
+ varea_tree.add(mid);
+ mid->ma->map(mid);
+
+ if (end) {
+ // Splits have already been done at both ends of the region,
+ // so there's no need to look up the ending address.
+
+ varea_tree.add(end);
+ mid->ma->map(end);
+ return mid;
+ }
+
+ start = mid;
+ }
+
+ if (start->region().end == region.end)
+ return start;
+
+ if (start->region().end > region.end)
+ end = start;
+ else {
+ end = varea_tree.find(region.end);
+
+ if (!end)
+ return start;
+
+ assert(end->aspace == this);
+ assert(end->region().start <= region.end);
+ assert(end->region().end >= region.end);
+
+ if (end->region().end == region.end)
+ return start;
+ }
+
+ assert(end->region().end > region.end);
+
+ // There is a varea that straddles region.start;
+ // create a new varea "mid" for the overlapping part.
+
+ mid = new VirtualArea;
+
+ mid->aspace = this;
+ mid->region().start = region.start;
+
+ mid->region().start = end->region().start;
+ mid->region().end = region.end;
+
+ mid->flags = start->flags;
+ mid->ma = start->ma;
+ mid->offset = start->offset;
+
+ end->region().start = region.end + 1;
+
+ varea_tree.add(mid);
+ mid->ma->map(mid);
+
+ return start;
+ }
+
+ bool AddrSpace::get_free_region(ulong len, Region ®ion,
+ VirtualArea *&prev)
+ {
+ assert(page_aligned(len));
+ assert(cached_free_region);
+
+ region.start = cached_free_region;
+ region.end = cached_free_region + len - 1;
+
+ if (region.end <= Arch::user_end && !check_overlap(region, prev)) {
+ cached_free_region = region.end + 1;
+ return true;
+ }
+
+ for (Util::List *node = &prev->list_node; node != &varea_list;
+ node = node->next)
+ {
+ VirtualArea *va = node->listentry(VirtualArea, list_node);
+ ulong end = Arch::user_end;
+
+ if (node->next != &varea_list) {
+ VirtualArea *next = node->next->listentry(VirtualArea, list_node);
+ end = next->region().start - 1;
+ }
+
+ assert(end > va->region().end);
+
+ if (end - va->region().end >= len) {
+ region.start = va->region().end + 1;
+ region.end = region.start + len - 1;
+
+ assert(page_aligned(region.start));
+ cached_free_region = region.end + 1;
+ return true;
+ }
+ }
+
+ if (cached_free_region != Arch::user_start + Arch::page_size) {
+ cached_free_region = Arch::user_start + Arch::page_size;
+ return get_free_region(len, region, prev);
+ }
+
+ return false;
+ }
+
+ // The "mapped" paramater is used to indicate whether the top-level
+ // address space has had a mapping established. If "mapped" is
+ // false, but an exception is not thrown, then this method must
+ // be called again to propagate the mapping along the aspace chain.
+ //
+ // FIXME: Between aspace locks, if aspace's mapping is revoked and
+ // ma->aspace's mapping changes, a pagein could leak through and cause
+ // a page load or a copy-on-write breaking. This isn't a huge deal
+ // (it doesn't affect the correctness of the code or give aspace
+ // access to ma->aspace's new mapping), but it's unpleasant, and could
+ // have an adverse impact on determinism. If you have a real-time
+ // application that can't tolerate the occasional spurious pagein or
+ // copy-on-write breaking, then use an address space that hasn't
+ // previously been exposed to recursive mappers.
+
+ bool ASpaceMappable::rec_pagein(AddrSpace *aspace, u64 vaddr,
+ PTEFlags reqflags)
+ {
+ bool mapped = true;
+
+ // aspace->mappable.retain();
+
+ while (true) {
+ Lock::DroppableAutoLock autolock(aspace->lock);
+ VirtualArea *va = aspace->varea_tree.find(vaddr);
+
+ if (!va)
+ throw BadPageFault();
+
+ if ((va->flags & reqflags) != reqflags)
+ throw BadPageFault();
+
+ if (aspace->map(va, vaddr, reqflags))
+ break;
+
+ mapped = false;
+ Mappable *ma = va->ma;
+ vaddr += va->offset;
+
+ // ma->retain();
+ autolock.unlock();
+ // aspace->mappable.release();
+
+ if (!ma->is_aspace) {
+ ma->pagein(vaddr, reqflags);
+ // ma->release();
+ break;
+ }
+
+ aspace = static_cast<ASpaceMappable *>(ma)->aspace;
+ }
+
+ return mapped;
+ }
+
+ void ASpaceMappable::pagein(u64 vaddr, PTEFlags reqflags)
+ {
+ while (!rec_pagein(aspace, vaddr, reqflags));
+ }
+
+ void AddrSpace::break_copy_on_write(VirtualArea *va, u64 vaddr, u64 phys)
+ {
+ assert(lock.held_by_curthread());
+ assert(rmap_lock.held_by_curthread());
+
+ assert(va->flags.FaultOnWrite);
+ assert(va->aspace == this);
+
+ Page *old_page = phys_to_page(phys);
+
+ Region region = { vaddr, vaddr + Arch::page_size - 1 };
+
+ // If this is the only reference to the page left, then
+ // nothing needs to be copied. Just clear the COW condition.
+ if (is_phys_page(old_page) && old_page->get_refcount() == 1) {
+ PTEFlags mask, flags;
+ mask.FaultOnWrite = 1;
+
+ page_table->set_flags(region, flags, mask);
+ return;
+ }
+
+ Page *new_page = PageAlloc::alloc(1);
+
+ // FIXME -- highmem
+ // OPT: It'd be better to do this without the rmap_lock held,
+ // especially if rmap_lock is global rather than per-physpage.
+ // I want to keep things simple for now and optimize later,
+ // though.
+
+ memcpy(page_to_kvirt(new_page), phys_to_kvirt(phys),
+ Arch::page_size);
+
+ page_table->rmap_table.break_copy_on_write(region.start, new_page);
+ new_page->release();
+ }
+
+ void ASpaceMappable::get_entry(u64 vaddr, u64 *phys, PTEFlags *flags)
+ {
+ aspace->page_table->get_entry(vaddr, phys, flags);
+ }
+
+ bool AddrSpace::map(VirtualArea *va, u64 vaddr, PTEFlags reqflags)
+ {
+ Lock::AutoLock autolock(rmap_lock);
+ assert(va->aspace == this);
+
+ u64 phys;
+ PTEFlags flags;
+ va->ma->get_entry(vaddr + va->offset, &phys, &flags);
+
+ PTEFlags newflags = flags & va->flags;
+ newflags.FaultOnWrite = flags.FaultOnWrite | va->flags.FaultOnWrite;
+
+ if (!newflags.Valid) {
+ assert(va->flags.Valid);
+ return false;
+ }
+
+ if ((newflags & reqflags) != reqflags)
+ return false;
+
+ u64 oldphys;
+ PTEFlags oldflags;
+ page_table->get_entry(vaddr, &oldphys, &oldflags);
+
+ if (oldflags.Valid &&
+ !(reqflags.Writeable && oldflags.FaultOnWrite))
+ {
+ // If the existing mapping is valid, don't try to map it again.
+ // The existing mapping was put there possibly by a race, but
+ // more likely because a FaultOnWrite was handled upstream.
+ //
+ // FaultOnWrite handling is the only type of mapping change that
+ // can be done directly; all others must change the varea and do
+ // an rmap invalidation instead. FaultOnWrite is special
+ // because we don't want to split vareas for every page that
+ // gets its copy-on-write broken.
+
+ assert((oldflags & reqflags) == reqflags);
+ return true;
+ }
+
+ if (reqflags.Writeable && oldflags.FaultOnWrite)
+ {
+ // The FaultOnWrite needs to be handled upstream.
+ if (!va->flags.FaultOnWrite)
+ return false;
+
+ va->aspace->break_copy_on_write(va, vaddr, phys);
+ } else {
+ assert(!oldflags.Valid);
+ PageTable *usptbl = NULL;
+
+ if (va->ma->is_aspace) {
+ ASpaceMappable *asma = static_cast<ASpaceMappable *>(va->ma);
+ usptbl = asma->aspace->page_table;
+ }
+
+ RMapTable::map(va, usptbl, vaddr, vaddr + va->offset);
+
+ RegionWithOffset rwo;
+ rwo.start = vaddr;
+ rwo.end = vaddr + Arch::page_size - 1;
+ rwo.offset = phys;
+
+ page_table->map(rwo, newflags);
+ }
+
+ return true;
+ }
+
+ void ASpaceMappable::get_size(u64 *size)
+ {
+ aspace->get_size(size);
+ }
+
+ void AddrSpace::map(IMappable ma, Region region, u64 *vstart,
+ MapFlags mflags, bool from_kernel, int map_type)
+ {
+ // FIXME: check alignment for VIPT caches
+ // FIXME: Implement the "Replace" map flag
+
+ if (mflags.Replace)
+ throw_idl(InvalidArgument, 3,
+ countarray("Replace unimplemented"));
+
+ Mappable *cma = Mappable::classptr(ma);
+ if (!cma) {
+ // The given IMappable does not refer to a Mappable
+ // of this kernel.
+
+ throw_idl(InvalidArgument, 0, nullarray);
+ }
+
+ bool fixed = mflags.Fixed;
+
+ if (is_process)
+ mflags.Fixed = 1;
+
+ if (!page_aligned(region.start))
+ throw_idl(InvalidArgument, 1, countarray("unaligned start"));
+
+ if (!page_aligned(region.end + 1))
+ throw_idl(InvalidArgument, 1, countarray("unaligned end"));
+
+ Lock::AutoLock autolock(lock);
+ Region vregion;
+ VirtualArea *prev;
+
+ if (*vstart != System::Mem::AddrSpace_ns::unspecified_start) {
+ vregion.start = *vstart;
+ vregion.end = vregion.start + region.end - region.start + 1;
+
+ if (is_process) {
+ if (!valid_addr(vregion.start))
+ throw_idl(InvalidArgument, 2,
+ countarray("invalid virtual start"));
+
+ if (!valid_addr(vregion.end))
+ throw_idl(InvalidArgument, 2,
+ countarray("invalid virtual end"));
+ }
+
+ if (check_overlap(vregion, prev))
+ *vstart = System::Mem::AddrSpace_ns::unspecified_start;
+ }
+
+ if (*vstart == System::Mem::AddrSpace_ns::unspecified_start) {
+ if (fixed)
+ throw_idl(ResourceBusy, 2, countarray("varea overlap"));
+
+ if (!get_free_region(region.end - region.start + 1, vregion, prev))
+ throw_idl(OutOfSpace, countarray("out of vspace"));
+
+ *vstart = vregion.start;
+ }
+
+ VirtualArea *newva = new VirtualArea;
+ newva->aspace = this;
+ newva->region() = vregion;
+
+ newva->flags.Valid = 1;
+ newva->flags.User = map_type != map_kernel;
+ newva->flags.Readable = mflags.access_IDLNS_Read;
+ newva->flags.Writeable = mflags.access_IDLNS_Write;
+ newva->flags.Executable = mflags.access_IDLNS_Exec;
+ newva->flags.FaultOnWrite = mflags.CopyOnWrite;
+ newva->flags.Protected = map_type != map_user;
+ newva->ma = cma;
+ newva->offset = region.start - vregion.start;
+
+ varea_tree.add(newva);
+ newva->ma->map(newva);
+
+ if (prev) {
+ prev->list_node.add_front(&newva->list_node);
+ } else {
+ varea_list.add_front(&newva->list_node);
+ }
+ }
+
+ void AddrSpace::unmap(Region region, bool from_kernel)
+ {
+ u64 orig_start = region.start;
+
+ while (region.start <= region.end) {
+ Lock::DroppableAutoLock autolock(lock);
+ VirtualArea *va;
+
+ // If check_overlap returns false, then there are no vareas
+ // in the specified region, so there's nothing to unmap.
+
+ if (!check_overlap(region, va))
+ return;
+
+ if (va->flags.Protected && !from_kernel) {
+ region.start = va->list_node.next->
+ listentry(VirtualArea, list_node)->region().start;
+
+ if (region.start <= orig_start)
+ break;
+
+ continue;
+ }
+
+ u64 va_end = va->region().end;
+ u64 next_start = 0;
+
+ if (va_end > region.end) {
+ u64 va_start = va->region().start;
+ va->region().start = region.end + 1;
+
+ if (va_start < region.start) {
+ VirtualArea *newva = new VirtualArea;
+
+ newva->aspace = this;
+ newva->region().start = va_start;
+ newva->region().end = region.start - 1;
+
+ newva->flags = va->flags;
+ newva->ma = va->ma;
+ newva->offset = va->offset;
+
+ varea_tree.add(newva);
+ newva->ma->map(newva);
+ }
+
+ VirtualArea *nextva =
+ va->list_node.next->listentry(VirtualArea, list_node);
+
+ next_start = nextva->region().start;
+ } else if (va->region().start < region.start) {
+ va->region().end = region.start - 1;
+ } else {
+ varea_tree.del(va);
+ va->ma->unmap(va);
+ }
+
+ // This is done after the varea removal, so that new faults
+ // don't map things in again.
+
+ // OPT: Skip RMap-based unmapping if nothing maps this aspace.
+ // OPT: Push this loop into the RMap code, allowing it to skip
+ // empty portions of the tables (as the pagetable code currently
+ // does).
+
+ while (region.start <= va_end && region.start <= region.end) {
+ page_table->rmap_table.unmap(region.start);
+ region.start += Arch::page_size;
+
+ if (region.start <= orig_start)
+ break;
+ }
+
+ region.start = next_start;
+
+ if (region.start <= orig_start)
+ break;
+ }
+ }
+
+ void AddrSpace::set_mapflags(Region region, MapFlags mflags)
+ {
+ // FIXME: implement
+ // Find varea, split if necessary, propagate change to stacked aspaces
+ }
+
+ void AddrSpace::get_mapflags(Region region, MapFlags *mflags, uint8_t *all_same)
+ {
+ // FIXME: implement
+ }
+
+ void AddrSpace::get_mapping(Region region, IMappable *ma, u64 *offset)
+ {
+ // FIXME: implement
+ }
+
+ void AddrSpace::get_page_size(u32 *page_size)
+ {
+ *page_size = Arch::page_size;
+ }
+
+ void AddrSpace::get_min_align(u32 *min_align)
+ {
+ *min_align = Arch::page_mapping_min_align;
+ }
+
+ void Mappable::map(VirtualArea *varea)
+ {
+ mappings_lock.lock_irq();
+ mappings.add_back(&varea->mappings_node);
+ mappings_lock.unlock_irq();
+ }
+
+ void Mappable::unmap(VirtualArea *varea)
+ {
+ mappings_lock.lock_irq();
+ varea->mappings_node.del();
+ mappings_lock.unlock_irq();
+ }
+
+ void PageTable::kill_pte(ulong vaddr, u64 paddr, bool dirty,
+ bool valid, bool no_release)
+ {
+ Page *oldpage = phys_to_page(paddr);
+
+ if (!is_phys_page(oldpage))
+ oldpage = NULL;
+
+ if (is_process && valid) {
+ Arch::invalidate_tlb_entry(vaddr);
+
+ if (oldpage && dirty &&
+ !ll_test_and_set(&oldpage->flags, PageFlags::bits::Dirty))
+ {
+ oldpage->retain();
+ // Queue page for writeback
+ }
+ }
+
+ if (!no_release && oldpage)
+ oldpage->release();
+ }
+
+ // FIXME: Add a special PTE flag to indicate that PhysMem mappings
+ // don't mess with page refcounts.
+
+ class PhysMem : public Mappable {
+ public:
+ void get_size(u64 *size)
+ {
+ if (sizeof(long) == 8)
+ *size = 1ULL << (64 - Arch::page_shift);
+ else
+ *size = 1ULL << (32 - Arch::page_shift);
+ }
+
+ void pagein(u64 vaddr, PTEFlags reqflags)
+ {
+ // Doesn't need to do anything yet, though it may later
+ // once high memory support is added.
+ }
+
+ void get_entry(u64 addr, u64 *phys, PTEFlags *flags)
+ {
+ *phys = addr;
+ *flags = 0;
+ flags->Valid = 1;
+ flags->Readable = 1;
+ flags->Writeable = 1;
+ flags->Executable = 1;
+ flags->User = 1;
+ }
+ };
+
+ PhysMem real_physmem;
+ IMappable physmem = real_physmem;
+}
+
+#include <servers/mem/addrspace/footer.cc>
--- /dev/null
+class Mem.AddrSpace : System.Mem.AddrSpace;
+class Mem.AddrSpaceFactory : System.Objects.Factory;
+class Mem.ProcAddrSpaceFactory : System.Objects.Factory;
+class Mem.Mappable : System.Mem.Mappable;
--- /dev/null
+// mem/orbmm.cc -- The ORB memory manager for the kernel address space
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/mem.h>
+#include <kern/paging.h>
+
+namespace System {
+namespace RunTime {
+ ORBMM orbmm_real;
+ ORBMM *orbmm = &orbmm_real;
+
+ ORBMM::ORBMM()
+ {
+ priv = NULL;
+ }
+
+ void *ORBMM::alloc(size_t size, AllocGroup *group)
+ {
+ return reinterpret_cast<void *>(new u8[size]);
+ }
+
+ void ORBMM::retain(Region region)
+ {
+ // FIXME
+ }
+
+ void ORBMM::release(Region region)
+ {
+ // FIXME
+ }
+}
+}
--- /dev/null
+// mem/pagealloc.cc -- low-level page allocator
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/kernel.h>
+#include <kern/pagealloc.h>
+#include <kern/paging.h>
+#include <kern/bitops.h>
+#include <kern/list.h>
+#include <kern/mem.h>
+#include <limits.h>
+
+namespace Mem {
+ Page *pages, *last_page;
+ inline size_t PageAllocZone::chunk_size(Page *start)
+ {
+ ptrdiff_t ret = start->free.otherside - start + 1;
+ assert(ret >= 1);
+ assert(static_cast<size_t>(ret) <= zonesize);
+ return static_cast<size_t>(ret);
+ }
+
+ inline Page *PageAllocZone::bin_to_head(int bin)
+ {
+ return &bins[bin];
+ }
+
+ inline uint PageAllocZone::bin_to_size(int bin)
+ {
+ return 1 << bin;
+ }
+
+ inline int PageAllocZone::size_to_bin_alloc(size_t size)
+ {
+ int bin = ll_get_order_round_up(size);
+
+ if (bin >= num_bins)
+ bin = num_bins - 1;
+
+ return bin;
+ }
+
+ inline int PageAllocZone::size_to_bin_free(size_t size)
+ {
+ int bin = ll_get_order_round_down(size);
+
+ if (bin >= num_bins)
+ bin = num_bins - 1;
+
+ return bin;
+ }
+
+ inline void PageAllocZone::remove_chunk(Page *chunk, int bin)
+ {
+ chunk->chunk_rmap_list.del();
+
+ // If bin's list is empty, forward it to the next non-empty bin.
+
+ Page *head = bin_to_head(bin);
+ if (head->chunk_rmap_list.empty()) {
+ Page *newforward = bin_to_head(bin + 1)->free.otherside;
+ assert(head->free.otherside == head);
+
+ for (Page *p = head;
+ p - bins >= 0 && p->free.otherside == head; p--)
+ p->free.otherside = newforward;
+ }
+ }
+
+ inline void PageAllocZone::add_to_bin(Page *chunk, int bin)
+ {
+ Page *head = bin_to_head(bin);
+ Page *oldforward = head->free.otherside;
+
+ head->chunk_rmap_list.add_front(&chunk->chunk_rmap_list);
+
+ // Remove bin's forwarding if it was empty, along with smaller
+ // bins pointing past this bin.
+
+ if (oldforward != head) {
+ Page *p = head;
+ do {
+ p->free.otherside = head;
+ p--;
+ } while (p - bins >= 0 && p->free.otherside == oldforward);
+ }
+ }
+
+ // Break off a piece from the end of the chunk, rather than the start.
+ // This way, only the size of the remaining chunk needs to be adjusted
+ // as long as it still fits in the same bin.
+
+ Page *PageAllocZone::shrink_chunk(Page *start, int num_pages,
+ size_t chunk_size, int bin)
+ {
+ size_t size_left = chunk_size - num_pages;
+ Page *newend = start + size_left - 1;
+
+ start->free.otherside = newend;
+ newend->free.otherside = start;
+
+ if (size_left < bin_to_size(bin)) {
+ remove_chunk(start, bin);
+ add_to_bin(start, size_to_bin_alloc(size_left));
+ }
+
+ return newend + 1;
+ }
+
+ struct Page *PageAllocZone::alloc(uint num_pages)
+ {
+ Lock::AutoSpinLockRecIRQ autolock(lock);
+
+ assert(num_pages > 0);
+ assert(num_pages <= bin_to_size(num_bins - 1));
+
+ int bin = size_to_bin_alloc(num_pages);
+ Page *head = bin_to_head(bin)->free.otherside;
+ int realbin = head - bins;
+ Util::List *list = &head->chunk_rmap_list;
+
+ if (list->empty()) {
+ assert(realbin == num_bins - 1);
+ return NULL;
+ }
+
+ head = list->next->listentry(Page, chunk_rmap_list);
+
+ assert(head->flags & Page::Free);
+
+ size_t size = chunk_size(head);
+ assert(size >= bin_to_size(bin));
+ assert(size >= (uint)num_pages);
+
+ if (size != num_pages)
+ head = shrink_chunk(head, num_pages, size, realbin);
+ else
+ remove_chunk(head, realbin);
+
+ for (Page *p = head; p != head + num_pages; p++) {
+ assert(p->flags & Page::Free);
+ assert(!(p->flags & Page::InUse));
+ assert(p->chunk_rmap_list.empty());
+ p->flags = (p->flags & ~Page::Free) | Page::InUse;
+ p->inuse.refcount = 1;
+ }
+
+ return head;
+ }
+
+ void PageAllocZone::free(struct Page *head, size_t num_pages)
+ {
+ Lock::AutoSpinLockRecIRQ autolock(lock);
+
+ assert(num_pages > 0);
+
+ for (Page *p = head; p != head + num_pages; p++) {
+ assert(!(p->flags & Page::Free));
+ assert(p->chunk_rmap_list.empty());
+ p->flags = (p->flags & ~Page::InUse) | Page::Free;
+ }
+
+ // Combine the newly free chunk with any adjacent free chunks,
+ // regardless of what bin they are in.
+
+ Page *prevpage = head - 1;
+ Page *nextpage = head + num_pages;
+
+ int bin = -1;
+
+ if (prevpage - pages >= start && (prevpage->flags & Page::Free)) {
+ Page *prevchunk = prevpage->free.otherside;
+ assert(prevchunk->flags & Page::Free);
+
+ Page *end = head + num_pages - 1;
+ prevchunk->free.otherside = end;
+ end->free.otherside = prevchunk;
+
+ size_t prevsize = head - prevchunk;
+ assert(prevsize > 0);
+ assert(prevsize < zonesize);
+
+ head = prevchunk;
+ num_pages += prevsize;
+ bin = size_to_bin_free(prevsize);
+ }
+
+ if (nextpage - pages <= end && nextpage->flags & Page::Free) {
+ size_t prevsize = chunk_size(nextpage);
+ num_pages += prevsize;
+ Page *end = nextpage->free.otherside;
+
+ remove_chunk(nextpage, size_to_bin_free(prevsize));
+
+ end->free.otherside = head;
+ head->free.otherside = end;
+ }
+
+ int newbin = size_to_bin_free(num_pages);
+
+ if (bin != newbin) {
+ if (bin != -1) {
+ remove_chunk(head, bin);
+ assert(head->free.otherside == head + num_pages - 1);
+ }
+
+ head->free.otherside = head + num_pages - 1;
+ add_to_bin(head, newbin);
+ }
+ }
+
+ void PageAllocZone::init(uintptr_t base, size_t size)
+ {
+ assert(size > 0);
+ assert(base + size <= Arch::mem_end / Arch::page_size);
+
+ zonesize = size;
+ start = base;
+ end = base + size - 1;
+
+ for (Page *p = &pages[start]; p <= &pages[end]; p++) {
+ assert(p->flags == 0);
+ new(p) Page;
+ p->zone = this;
+ }
+
+ for (int i = 0; i < num_bins; i++) {
+ Page *bin = bin_to_head(i);
+
+ bin->free.otherside = bin_to_head(num_bins - 1);
+ bin->chunk_rmap_list.init();
+ }
+ }
+
+ Page *PageAlloc::alloc(uint num_pages, PageAllocZone *const *zonelist)
+ {
+ while (*zonelist) {
+ Page *ret = (*zonelist)->alloc(num_pages);
+ if (ret)
+ return ret;
+
+ zonelist++;
+ }
+
+ // FIXME: Deliver System.Traps.ReduceMemoryUsage first
+ throw_idl(OutOfMemory);
+ }
+
+ void Page::free_page()
+ {
+ PageAlloc::free(this, 1);
+ }
+
+ PageAlloc page_alloc;
+}
--- /dev/null
+class Mem.PageAlloc : System.Mem.Allocator;
+class Mem.PageAllocHandle : System.Mem.AllocHandle;
--- /dev/null
+// mem/pagetable.cc -- Generic page table implementation
+// Most architectures should be able to use this as is, though
+// architectures with weird paging hardware can provide their own implementation.
+//
+// OPT: Dynamically adjust the number of pagetable levels for PTEs that
+// support it (mainly for generic-pte).
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/mem.h>
+#include <kern/pagealloc.h>
+#include <kern/pagetable.h>
+#include <kern/generic-pte.h>
+#include <lowlevel/atomic.h>
+#include <util/misc.h>
+
+namespace Mem {
+ PageTable *kernel_page_table;
+
+ // The architecture must specify at least one data structure
+ // representing one PTE. Non-directory PTEs must support the |
+ // operation, either through overloading or providing a constructor and
+ // conversion operator for an integral type. It is assumed that a PTE
+ // of zero (by using bzero on the table) is a reasonable invalid PTE,
+ // but a PTE-specific bulk zero method could be added if necessary.
+ //
+ // Eventually multiple PTE formats will need to be supported in
+ // order to dynamically choose between PAE and non-PAE on 32-bit
+ // x86. When that happens, the arch will instantiate its choice
+ // of PageTableImpl template rather than export Arch::PTE.
+
+ // A PTE must support typedefs PTE::PhysAddr and PTE::VirtAddr which
+ // refers to integer types of the same size as the supported physical
+ // and virtual addresses, respectively...
+
+ // A PTE must support a typedef PTE::DirPTE which is used for
+ // non-final page tables. DirPTE may be the same as PTE. DirPTE must
+ // support valid_pte, set_pte, and shift_per_level as in a normal PTE,
+ // and must implement the following methods:
+ //
+ // void *get_table()
+ //
+ // A function to return the virtual address of the table pointed to
+ // by this DirPTE entry.
+ //
+ // static DirPTE set_table(void *table)
+ //
+ // A function to return a DirPTE pointing to the specified virtual
+ // address.
+
+ // A normal PTE must support the following methods:
+ //
+ // static void flags_to_pte(Mem::PTEFlags flagsin,
+ // Mem::PTEFlags maskin,
+ // PTE &flagsout,
+ // PTE &maskout)
+ //
+ // A function to turn Mem::PTEFlags into a PTE. It also produces
+ // a mask which can be used to produce a new pte by calling
+ // oldpte.set_flags(mask, newpteflags).
+ //
+ // PTE set_flags(PTE mask, PTE flags)
+ //
+ // Apply the flags to the PTE according to the provided mask.
+ //
+ // Mem::PTEFlags pte_to_flags()
+ //
+ // A function to turn a PTE into Mem::PTEFlags
+ //
+ // static uint addr_to_offset(VirtAddr addr, int shift)
+ //
+ // A function to take a virtual address and a shift level and return
+ // the offset into the page table in entries.
+ //
+ // PhysAddr pte_to_addr()
+ //
+ // A function to take a PTE and return the physical address contained
+ // therein.
+ //
+ // static PTE addr_to_pte(PhysAddr phys)
+ //
+ // A function to take a physical address and return a PTE with that
+ // address and no flags set.
+ //
+ // bool valid_pte()
+ // bool dirty_pte()
+ //
+ // A function to return whether the PTE is valid/dirty or not. This
+ // is a shortcut to keep from having to do a pte_to_flags repeatedly.
+ // It would have been slightly cleaner to make this a method of PTE,
+ // but that would require that PTE be implemented as a complex type,
+ // and I'd rather leave that up to the architecture.
+ //
+ // void set_pte(PTE *table, uint offset)
+ //
+ // A function to set a PTE in a page table. Normally, this is just
+ // a simple assignment, but some architectures may need to do something
+ // unusual (such as ensure atomicity if the PTE size is greater than
+ // the word size).
+ //
+ // PTE xchg_pte(PTE *table, uint offset)
+ //
+ // As set_pte, but atomically reads the old PTE while setting the
+ // new PTE, and returns the old one.
+
+ // A PTE must have the following constants:
+ //
+ // shift_per_level:
+ // The number of bits of virtual address space represented by one
+ // level of page tables. This is log2(number of pages per table).
+ //
+ // num_levels:
+ // The number of page table levels; this is used, but not imported,
+ // due to a conflict with PageTable::num_levels.
+ //
+ // page_size: the size of a page
+ // page_shift: log2(page_size)
+ //
+ // kmap_start, kmap_end:
+ // The kernel mappings portion of the address space is mapped into all
+ // address spaces, using shared page tables. This sharing occurs at
+ // the top-level page table (hopefully this will work for all
+ // architectures; it can be made configurable, at the cost of some
+ // complexity). kmap_start and kmap_end are indices into the top
+ // level page table that define which region is shared. These are only
+ // relevant for process address spaces.
+
+ using Util::round_up;
+
+ template<typename PTE>
+ void PageTableImpl<PTE>::end_map(RegionWithOffset region, PTE flags,
+ void *table)
+ {
+ uint start = PTE::addr_to_offset(region.start, PTE::page_shift);
+ uint end = PTE::addr_to_offset(region.end, PTE::page_shift);
+
+ Page *page = kvirt_to_page(table);
+
+ assert(start < pages_per_table());
+ assert(end < pages_per_table());
+ assert(page->flags & Page::InUse);
+
+ PTE *ptable = static_cast<PTE *>(table);
+
+ for (uint i = start; i <= end; i++) {
+ PTE newpte = PTE::addr_to_pte(region.offset) | flags;
+ PTE oldpte = newpte.xchg_pte(ptable, i);
+
+ retain_if_phys(region.offset);
+
+ if (oldpte) {
+ // vaddr is only for process aspaces, so don't worry
+ // about the ulong.
+
+ ulong vaddr = (ulong)region.start +
+ ((i - start) << PTE::page_shift);
+
+ kill_pte(vaddr, oldpte.pte_to_addr(),
+ oldpte.dirty_pte(), oldpte.valid_pte());
+ } else {
+ page->retain();
+ }
+
+ region.offset += PTE::page_size;
+ }
+ }
+
+ template<typename PTE>
+ void PageTableImpl<PTE>::rec_map(RegionWithOffset region, PTE flags,
+ void *table, int shift)
+ {
+ if (shift < lastlevel_shift) {
+ assert(shift + DirPTE::shift_per_level - PTE::shift_per_level == PTE::page_shift);
+ end_map(region, flags, table);
+ return;
+ }
+
+ Page *page = kvirt_to_page(table);
+
+ DirPTE *dtable = static_cast<DirPTE *>(table);
+ uint start = DirPTE::addr_to_offset(region.start, shift);
+ uint end = DirPTE::addr_to_offset(region.end, shift);
+ u64 orig_end = region.end;
+
+ assert(start < pages_per_dtable());
+ assert(end < pages_per_dtable());
+ assert(page->flags & Page::InUse);
+
+ if (start != end)
+ region.end = round_up(region.start + 1, shift) - 1;
+
+ for (uint i = start; i <= end; i++) {
+ void *subtable;
+
+ if (!dtable[i].valid_pte()) {
+ subtable = Mem::alloc_pages(1);
+ bzero(subtable, PTE::page_size);
+ DirPTE newpte = DirPTE::set_table(subtable);
+ newpte.set_pte(dtable, i);
+ page->retain();
+ } else {
+ subtable = dtable[i].get_table();
+ }
+
+ rec_map(region, flags, subtable, shift - DirPTE::shift_per_level);
+
+ region.offset += region.end - region.start + 1;
+ region.start = region.end + 1;
+
+ if (i + 1 == end)
+ region.end = orig_end;
+ else
+ region.end += 1UL << shift;
+ }
+ }
+
+ template <typename PTE>
+ void PageTableImpl<PTE>::end_unmap(Region region, void *table)
+ {
+ Page *page = kvirt_to_page(table);
+ uint start = PTE::addr_to_offset(region.start, PTE::page_shift);
+ uint end = PTE::addr_to_offset(region.end, PTE::page_shift);
+
+ assert(start < pages_per_table());
+ assert(end < pages_per_table());
+ assert(page->flags & Page::InUse);
+
+ PTE *ptable = static_cast<PTE *>(table);
+
+ for (uint i = start; i <= end; i++) {
+ if (ptable[i]) {
+ PTE oldpte = PTE().xchg_pte(ptable, i);
+
+ if (oldpte) {
+ // vaddr is only for process aspaces, so don't worry
+ // about the ulong.
+
+ ulong vaddr = (ulong)region.start +
+ ((i - start) << PTE::page_shift);
+
+ kill_pte(vaddr, oldpte.pte_to_addr(),
+ oldpte.dirty_pte(), oldpte.valid_pte());
+ }
+
+ assert(page->inuse.refcount > 1);
+ page->release();
+ }
+ }
+ }
+
+ template <typename PTE>
+ void PageTableImpl<PTE>::rec_unmap(Region region, void *table, int shift)
+ {
+ if (shift < lastlevel_shift) {
+ assert(shift + DirPTE::shift_per_level - PTE::shift_per_level == PTE::page_shift);
+ end_unmap(region, table);
+ return;
+ }
+
+ Page *page = kvirt_to_page(table);
+ uint start = DirPTE::addr_to_offset(region.start, shift);
+ uint end = DirPTE::addr_to_offset(region.end, shift);
+ u64 orig_end = region.end;
+
+ assert(start < pages_per_dtable());
+ assert(end < pages_per_dtable());
+ assert(page->flags & Page::InUse);
+
+ DirPTE *dtable = static_cast<DirPTE *>(table);
+
+ if (start != end)
+ region.end = round_up(region.start + 1, shift) - 1;
+
+ for (uint i = start; i <= end; i++) {
+ if (dtable[i].valid_pte()) {
+ void *subtable = dtable[i].get_table();
+
+ rec_unmap(region, subtable, shift - DirPTE::shift_per_level);
+
+ Page *subpage = kvirt_to_page(subtable);
+ assert(subpage->flags & Page::InUse);
+ assert(subpage->inuse.refcount > 0);
+
+ if (subpage->inuse.refcount == 1) {
+ DirPTE().set_pte(dtable, i);
+ subpage->release();
+
+ assert(page->inuse.refcount > 1);
+ page->release();
+ }
+ }
+
+ region.start = region.end + 1;
+
+ if (i + 1 == end)
+ region.end = orig_end;
+ else
+ region.end += 1UL << shift;
+ }
+ }
+
+ template <typename PTE>
+ void PageTableImpl<PTE>::end_set_flags(Region region, PTE flags,
+ PTE mask, void *table)
+ {
+ uint start = PTE::addr_to_offset(region.start, PTE::page_shift);
+ uint end = PTE::addr_to_offset(region.end, PTE::page_shift);
+
+ assert(start < pages_per_table());
+ assert(end < pages_per_table());
+
+ PTE *ptable = static_cast<PTE *>(table);
+
+ for (uint i = start; i <= end; i++) {
+ if (ptable[i]) {
+ PTE oldpte = ptable[i].set_flags(mask, flags);
+
+ // vaddr is only for process aspaces, so don't worry
+ // about the ulong.
+
+ ulong vaddr = (ulong)region.start +
+ ((i - start) << PTE::page_shift);
+
+ kill_pte(vaddr, oldpte.pte_to_addr(),
+ oldpte.dirty_pte(), oldpte.valid_pte(), true);
+ }
+ }
+ }
+
+ template <typename PTE>
+ void PageTableImpl<PTE>::rec_set_flags(Region region, PTE flags,
+ PTE mask, void *table,
+ int shift)
+ {
+ if (shift < lastlevel_shift) {
+ assert(shift + DirPTE::shift_per_level - PTE::shift_per_level == PTE::page_shift);
+ shift = PTE::page_shift;
+ }
+
+ uint start = DirPTE::addr_to_offset(region.start, shift);
+ uint end = DirPTE::addr_to_offset(region.end, shift);
+ u64 orig_end = region.end;
+
+ assert(start < pages_per_dtable());
+ assert(end < pages_per_dtable());
+
+ DirPTE *dtable = static_cast<DirPTE *>(table);
+
+ if (start != end)
+ region.end = round_up(region.start + 1, shift) - 1;
+
+ for (uint i = start; i <= end; i++) {
+ if (dtable[i].valid_pte()) {
+ void *subtable = dtable[i].get_table();
+
+ rec_set_flags(region, flags, mask, subtable,
+ shift - DirPTE::shift_per_level);
+ }
+
+ region.start = region.end + 1;
+
+ if (i + 1 == end)
+ region.end = orig_end;
+ else
+ region.end += 1UL << shift;
+ }
+ }
+
+ template <typename PTE>
+ void PageTableImpl<PTE>::map(RegionWithOffset region, Flags flags)
+ {
+ Lock::AutoLock autolock(lock);
+ PTE pte, ptemask;
+ PTE::flags_to_pte(flags, ~0UL, pte, ptemask);
+ PTE *table = static_cast<PTE *>(toplevel);
+ rec_map(region, pte, table, toplevel_shift);
+ }
+
+ template <typename PTE>
+ void PageTableImpl<PTE>::unmap(Region region)
+ {
+ Lock::AutoLock autolock(lock);
+ PTE *table = static_cast<PTE *>(toplevel);
+ rec_unmap(region, table, toplevel_shift);
+ }
+
+ template <typename PTE>
+ void PageTableImpl<PTE>::set_flags(Region region, Flags flags, Flags mask)
+ {
+ Lock::AutoLock autolock(lock);
+ PTE pte, ptemask;
+ PTE::flags_to_pte(flags, mask, pte, ptemask);
+ PTE *table = static_cast<PTE *>(toplevel);
+ rec_set_flags(region, pte, ptemask, table, toplevel_shift);
+ }
+
+ template <typename PTE>
+ void PageTableImpl<PTE>::get_entry(u64 addr, u64 *phys, Flags *flags)
+ {
+ Lock::AutoLock autolock(lock);
+ int shift = toplevel_shift;
+
+ void *table = toplevel;
+
+ while (shift >= lastlevel_shift) {
+ DirPTE *dtable = static_cast<DirPTE *>(table);
+ DirPTE dpte = dtable[DirPTE::addr_to_offset(addr, shift)];
+
+ if (!dpte.valid_pte()) {
+ flags->Valid = 0;
+ return;
+ }
+
+ table = dpte.get_table();
+ shift -= DirPTE::shift_per_level;
+ }
+
+ assert(shift + DirPTE::shift_per_level - PTE::shift_per_level == PTE::page_shift);
+
+ PTE *ptable = static_cast<PTE *>(table);
+
+ uint off = PTE::addr_to_offset(addr, PTE::page_shift);
+
+ *phys = ptable[off].pte_to_addr();
+ *flags = ptable[off].pte_to_flags();
+ }
+
+ template <typename PTE>
+ PageTableImpl<PTE>::PageTableImpl(bool process) : PageTable(process)
+ {
+ toplevel = Mem::alloc_pages(1);
+ PTE *table = static_cast<PTE *>(toplevel);
+
+ if (is_process) {
+ num_levels = PTE::num_levels;
+
+ if (PTE::kmap_start != 0)
+ bzero(table, PTE::kmap_start * sizeof(PTE));
+
+ if (PTE::kmap_end != pages_per_dtable() - 1)
+ bzero(table + PTE::kmap_end + 1,
+ (pages_per_dtable() - PTE::kmap_end - 1) * sizeof(PTE));
+
+ PTE *ktable = static_cast<PTE *>(kernel_page_table->toplevel);
+
+ memcpy(table + PTE::kmap_start, ktable + PTE::kmap_start,
+ (PTE::kmap_end - PTE::kmap_start + 1) * sizeof(PTE));
+ } else {
+ // FIXME: growable levels
+ num_levels = PTE::num_levels;
+ bzero(table, PTE::page_size);
+ }
+
+ toplevel_shift = lastlevel_shift = PTE::page_shift;
+
+ if (num_levels > 1) {
+ lastlevel_shift += PTE::shift_per_level;
+ toplevel_shift += PTE::shift_per_level +
+ (num_levels - 2) * DirPTE::shift_per_level;
+ }
+ }
+
+ template <typename PTE>
+ PageTableImpl<PTE>::PageTableImpl(void *table) : PageTable(true)
+ {
+ assert(!kernel_page_table);
+
+ toplevel = table;
+ num_levels = PTE::num_levels;
+ kernel_page_table = this;
+ }
+
+ template <typename PTE>
+ PageTableImpl<PTE>::~PageTableImpl()
+ {
+ assert(this != kernel_page_table);
+
+ if (is_process) {
+ Region region1 = { 0, ((VirtAddr)PTE::kmap_start - 1) << toplevel_shift };
+ Region region2 = { ((VirtAddr)PTE::kmap_end + 1) << toplevel_shift, ~0UL };
+
+ if (PTE::kmap_start != 0)
+ unmap(region1);
+ if (PTE::kmap_end != pages_per_dtable() - 1)
+ unmap(region2);
+ } else {
+ Region region = { 0, ~0UL };
+ unmap(region);
+ }
+
+ Page *page = kvirt_to_page(toplevel);
+ assert(page->flags & Page::InUse);
+ assert(page->inuse.refcount == 1);
+
+ Mem::free_pages(toplevel, 1);
+ }
+
+ template class PageTableImpl<Arch::PTE>;
+ template class PageTableImpl<GenPTE>;
+}
--- /dev/null
+// mem/rmap.cc -- Reverse mapping from physical page frames (or
+// intermediate address spaces) to mappers.
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/mem.h>
+#include <kern/pagealloc.h>
+#include <util/misc.h>
+
+namespace Mem {
+ using Util::round_up;
+ // static uint rmaps_per_page = Arch::page_size / sizeof(RMapNode);
+
+ // If RMapNode's length becomes something other than 8 longs,
+ // change "3" to the base-2 log of the number of longs.
+
+ static int rmap_shift = Arch::page_shift - _LL_LONG_LOGBYTES - 3;
+
+ // static int rmap_dirs_per_page = Arch::page_size / sizeof(RMapNode *);
+ static int rmap_dir_shift = Arch::page_shift - _LL_LONG_LOGBYTES;
+ static int rmap_lastlevel_shift = rmap_shift + Arch::page_shift;
+
+ static int rmap_dir_levels = (64 - rmap_lastlevel_shift - 1)
+ / rmap_dir_shift;
+
+ static int rmap_toplevel_shift = rmap_dir_shift * rmap_dir_levels
+ + rmap_lastlevel_shift;
+
+ static inline u64 addr_to_dir_offset(u64 addr, int shift)
+ {
+ return (addr >> shift) & ((1ULL << rmap_dir_shift) - 1);
+ }
+
+ static inline u64 addr_to_offset(u64 addr)
+ {
+ return (addr >> Arch::page_shift) & ((1ULL << rmap_shift) - 1);
+ }
+
+ RMapTable::RMapTable()
+ {
+ // All RMap tables must have at least one dir level, in order to
+ // simplify the code. If it turns out that a lot of memory is
+ // wasted due to this, the code could be made more complex in order
+ // to allow one-level rmap tables. Currently, on 4KiB-page systems,
+ // a page is wasted per under-512KiB aspace (32-bit) or under-256KiB
+ // aspace (64-bit).
+ //
+ // Dynamic levels would have to be implemented in generic-pte for
+ // the wastage here to be meaningful.
+
+ toplevel_shift = rmap_lastlevel_shift;
+ toplevel = Mem::alloc_pages(1);
+ bzero(toplevel, Arch::page_size);
+ }
+
+ RMapNode *RMapTable::get_rmap(u64 virtaddr, bool add)
+ {
+ assert(rmap_lock.held_by_curthread());
+ int shift = toplevel_shift;
+ void *table = toplevel;
+
+ while (toplevel_shift < rmap_toplevel_shift &&
+ (virtaddr >> (toplevel_shift + rmap_dir_shift)))
+ {
+ if (!add)
+ return NULL;
+
+ shift += rmap_dir_shift;
+ toplevel_shift += rmap_dir_shift;
+
+ toplevel = Mem::alloc_pages(1);
+ bzero(toplevel, Arch::page_size);
+
+ static_cast<void **>(toplevel)[0] = table;
+ table = toplevel;
+ }
+
+ while (shift >= rmap_lastlevel_shift) {
+ int off = addr_to_dir_offset(virtaddr, shift);
+ void *new_table = static_cast<void **>(table)[off];
+
+ if (!new_table) {
+ new_table = Mem::alloc_pages(1);
+ bzero(new_table, Arch::page_size);
+ static_cast<void **>(table)[off] = new_table;
+ }
+
+ table = new_table;
+ shift -= rmap_dir_shift;
+ }
+
+ assert(shift + rmap_dir_shift - rmap_shift == Arch::page_shift);
+
+ int off = addr_to_offset(virtaddr);
+ return &static_cast<RMapNode *>(table)[off];
+ }
+
+ void RMapTable::map(VirtualArea *dsva, PageTable *usptbl,
+ u64 dsvaddr, u64 usvaddr)
+ {
+ RMapNode *dsrmap = dsva->aspace->page_table->
+ rmap_table.get_rmap(dsvaddr, true);
+
+ assert(!dsrmap->va);
+ dsrmap->va = dsva;
+ dsrmap->vaddr = page_align(dsvaddr);
+ dsrmap->head.init();
+ dsrmap->tail.init();
+
+ if (usptbl) {
+ RMapNode *usrmap = usptbl->rmap_table.get_rmap(usvaddr);
+ assert(usrmap);
+ assert(usrmap->va->aspace->page_table == usptbl);
+
+ usrmap->head.add_front(&dsrmap->head);
+ } else {
+ // FIXME: If it ends up being useful, link into the phys-page
+ // rmap list.
+ }
+
+ dsrmap->head.add_front(&dsrmap->tail);
+ }
+
+ void RMapTable::unmap(u64 virtaddr)
+ {
+ Lock::AutoLock autolock(rmap_lock);
+ RMapNode *head = get_rmap(virtaddr);
+
+ if (!head || !head->va)
+ return;
+
+ assert(head->vaddr == virtaddr);
+
+ Util::ListNoAutoInit *node = &head->head, *oldnode;
+
+ do {
+ ulong off = reinterpret_cast<ulong>(node) & (sizeof(RMapNode) - 1);
+ if (off == RMapNode::head_offset) {
+ RMapNode *rmap = node->listentry(RMapNode, head);
+
+ Region region = { rmap->vaddr,
+ rmap->vaddr + Arch::page_shift - 1 };
+
+ rmap->va->aspace->page_table->unmap(region);
+ rmap->va = NULL;
+ } else {
+ assert(off == RMapNode::tail_offset);
+ }
+
+ oldnode = node;
+ node = node->next;
+ oldnode->del();
+ } while (node != &head->tail);
+
+ node->del();
+ }
+
+ void RMapTable::break_copy_on_write(u64 virtaddr, Page *new_page)
+ {
+ assert(rmap_lock.held_by_curthread());
+ RMapNode *head = get_rmap(virtaddr);
+ RMapNode *still_cow = NULL;
+
+ assert(head && head->va);
+ assert(head->vaddr == virtaddr);
+
+ // If there are sibling or upstream mappings of this page,
+ // detach the rmap list.
+
+ if (head->head.prev != &head->tail) {
+ head->head.prev->next = head->tail.next;
+ head->tail.next->prev = head->head.prev;
+
+ head->head.prev = &head->tail;
+ head->tail.next = &head->head;
+ }
+
+ assert(head->tail.next == &head->head);
+ Util::ListNoAutoInit *node = &head->head;
+
+ do {
+ ulong off = reinterpret_cast<ulong>(node) & (sizeof(RMapNode) - 1);
+ if (off == RMapNode::head_offset) {
+ RMapNode *rmap = node->listentry(RMapNode, head);
+ RegionWithOffset region;
+
+ region.start = rmap->vaddr;
+ region.end = rmap->vaddr + Arch::page_shift - 1;
+ region.offset = page_to_phys(new_page);
+
+ PTEFlags flags = rmap->va->flags;
+
+ // The faulting mapping always has PTE FaultOnWrite cleared;
+ // downstream mappings have PTE FaultOnWrite cleared if they
+ // are not downstream of different mapping with VA
+ // FaultOnWrite set. Downstream mappings should never have
+ // PTE FaultOnWrite clear if VA FaultOnWrite is set; if the
+ // downstream mapping had been cow-broken, it would have been
+ // removed from this physpage's rmap list.
+
+ if (flags.FaultOnWrite && node != &head->head && !still_cow)
+ still_cow = rmap;
+
+ if (still_cow)
+ flags.FaultOnWrite = 1;
+ else
+ flags.FaultOnWrite = 0;
+
+ rmap->va->aspace->page_table->map(region, flags);
+ } else {
+ assert(off == RMapNode::tail_offset);
+
+ if (still_cow) {
+ RMapNode *rmap = node->listentry(RMapNode, tail);
+
+ // We've finished the downstreams of a COW mapping,
+ // so stop marking pages as COW.
+
+ if (rmap == still_cow)
+ still_cow = NULL;
+ }
+ }
+
+ node = node->next;
+ } while (node != &head->tail);
+
+ assert(!still_cow);
+ }
+
+ Lock::Lock rmap_lock;
+}
--- /dev/null
+DIR := orb/
+DIRS += $(DIR)
+
+RAW_CXXFILES := invoke
+CXXFILES += $(RAW_CXXFILES:%=$(DIR)%)
--- /dev/null
+// orb/invoke.cc -- Method Invocation
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/types.h>
+#include <kern/orb.h>
+#include <kern/pagealloc.h>
+#include <kern/compiler.h>
+#include <arch/usercopy.h>
+#include <orb.h>
+
+using System::RunTime::ParamInfoBlock;
+using namespace ORB;
+using Threads::Thread;
+
+namespace ORB {
+ static CallFrame *new_frame(Thread *thread)
+ {
+ CallStackHeader *hdr = thread->orbstack_top_hdr;
+
+ if (unlikely(thread->orbstack_top == hdr->num_frames - 1)) {
+ hdr = static_cast<CallStackHeader *>(Mem::alloc_pages(1));
+ new(hdr) CallStackHeader;
+ hdr->thread = thread;
+ hdr->num_frames = (Arch::page_size - sizeof(CallStackHeader)) /
+ sizeof(CallFrame);
+
+ thread->orbstack_top_hdr->node.add_front(&hdr->node);
+ thread->orbstack_top_hdr = hdr;
+ thread->orbstack_top = 0;
+ return &hdr->frames[0];
+ }
+
+ return &hdr->frames[thread->orbstack_top += 1];
+ }
+}
+
+extern "C" void invoke_method(ulong objid, ulong methid,
+ ParamInfoBlock *user_pib, ulong ret_pc)
+{
+ ParamInfoBlock pib = Arch::copyin(user_pib);
+ CallFrame *frame = new_frame(curthread);
+
+ frame->object = objid;
+ frame->method = methid;
+ frame->caller_user_pib = user_pib;
+ frame->ret_pc = ret_pc;
+
+ printf("invoke_method: frame %p object %lx method %lx pib %p ret %lx\n",
+ frame, frame->object, frame->method, frame->caller_user_pib,
+ frame->ret_pc);
+
+
+}
--- /dev/null
+DIR := tests/
+DIRS += $(DIR)
+
+# Only include one test at a time
+RAW_CXXFILES := aspace
+CXXFILES += $(RAW_CXXFILES:%=$(DIR)%)
--- /dev/null
+// tests/aspace.cc
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/types.h>
+#include <kern/libc.h>
+#include <kern/time.h>
+#include <kern/thread.h>
+#include <kern/mem.h>
+
+using namespace Mem;
+namespace Mem {
+ extern IMappable physmem;
+}
+
+void thread(void *arg)
+{
+ // Yuck -- out param->return value promotion would turn it into
+ // AddrSpace aspace = AddrSpace::downcast(Mem::proc_addr_space_factory.create()),
+ // but that's still ugly. Perhaps something like:
+ // AddrSpace aspace(Mem::proc_addr_space_factory).
+
+ Object obj;
+ proc_addr_space_factory.create(&obj);
+ IAddrSpace aspace = IAddrSpace::downcast(obj);
+
+ printf("aspace %p created\n", (void *)aspace);
+
+ AddrSpace *kaspace = AddrSpace::classptr(aspace);
+
+ printf("kaspace %p\n", kaspace);
+
+ curthread->set_aspace(kaspace);
+ printf("Addr space set\n");
+
+ IAddrSpace stacked[2];
+ IMappable mappables[2];
+
+ for (int i = 0; i < 2; i++) {
+ addr_space_factory.create(&obj);
+ stacked[i] = IAddrSpace::downcast(obj);
+ stacked[i].get_mappable(&mappables[i]);
+ }
+
+ printf("stacked %p %p mappable %p %p\n",
+ (void *)stacked[0], (void *)stacked[1],
+ (void *)mappables[0], (void *)mappables[1]);
+
+ char *buf = new char[100000];
+ printf("buf %p\n", buf);
+
+ char *abuf = (char *)((((ulong)buf) + 4095) & ~4095);
+ printf("abuf %p\n", abuf);
+
+ memset(abuf, 'A', Arch::page_size);
+ memset(abuf + Arch::page_size, 'B', Arch::page_size);
+
+ Region region;
+ u64 vstart[6];
+ System::Mem::MapFlags mf;
+ mf.access_IDLNS_Read = 1;
+ mf.access_IDLNS_Write = 1;
+ mf.Fixed = 1;
+
+ region.start = 0;
+ region.end = 0xffffffff;
+ vstart[4] = 0xdeadbeef00000000ULL;
+ vstart[5] = 0xdeadb12345678000ULL;
+
+ stacked[0].map(Mem::physmem, region, &vstart[4], mf);
+ mf.CopyOnWrite = 1;
+ stacked[0].map(Mem::physmem, region, &vstart[5], mf);
+ mf.CopyOnWrite = 0;
+ printf("stacked[0] vstart %llx, %llx\n", vstart[4], vstart[5]);
+
+ region.start = 0xdeadb00000000000ULL;
+ region.end = 0xdeadbfffffffffffULL;
+ vstart[4] = 0x1234500000000000ULL;
+ mf.Fixed = 0;
+// mf.access_IDLNS_Write = 0;
+
+ stacked[1].map(mappables[0], region, &vstart[4], mf);
+
+ printf("stacked[1] vstart %llx\n", vstart[4]);
+
+ region.start = vstart[4] + 0xeef00000000ULL + kvirt_to_phys(abuf);
+ region.end = vstart[4] + 0xeef00000000ULL + kvirt_to_phys(abuf) + Arch::page_size - 1;
+ vstart[0] = System::Mem::AddrSpace_ns::unspecified_start;
+ vstart[1] = System::Mem::AddrSpace_ns::unspecified_start;
+ vstart[2] = System::Mem::AddrSpace_ns::unspecified_start;
+ mf.access_IDLNS_Write = 1;
+
+ aspace.map(mappables[1], region, &vstart[0], mf);
+
+ region.start += Arch::page_size;
+ region.end += Arch::page_size;
+ aspace.map(mappables[1], region, &vstart[1], mf);
+
+ region.start = vstart[4] + 0x12345678000ULL + kvirt_to_phys(abuf);
+ region.end = vstart[4] + 0x12345678000ULL + kvirt_to_phys(abuf) + Arch::page_size - 1;
+
+ aspace.map(mappables[1], region, &vstart[2], mf);
+
+ mf.CopyOnWrite = 1;
+ aspace.map(mappables[1], region, &vstart[3], mf);
+
+ printf("vstart %llx %llx %llx %llx\n", vstart[0], vstart[1], vstart[2], vstart[3]);
+ char *vbuf[4];
+ vbuf[0] = (char *)vstart[0];
+ vbuf[1] = (char *)vstart[1];
+ vbuf[2] = (char *)vstart[2];
+ vbuf[3] = (char *)vstart[3];
+
+ printf("%c%c%c%c ", vbuf[0][0], vbuf[0][1], vbuf[0][2], vbuf[0][3]);
+ printf("%c%c%c%c ", vbuf[1][0], vbuf[1][1], vbuf[1][2], vbuf[1][3]);
+ printf("%c%c%c%c ", vbuf[2][0], vbuf[2][1], vbuf[2][2], vbuf[2][3]);
+ printf("%c%c%c%c\n", vbuf[3][0], vbuf[3][1], vbuf[3][2], vbuf[3][3]);
+
+ vbuf[0][0] = 'a';
+ vbuf[1][1] = 'b';
+ vbuf[2][2] = 'c';
+ vbuf[3][3] = 'd';
+
+#if 0
+ region.start = 0xdeadbeef00000000ULL + kvirt_to_phys(abuf) + Arch::page_size;
+ region.end = region.start + Arch::page_size - 1;
+ printf("unmapping %llx->%llx\n", region.start, region.end);
+
+ stacked[0].unmap(region);
+#endif
+
+ printf("%c%c%c%c ", vbuf[0][0], vbuf[0][1], vbuf[0][2], vbuf[0][3]);
+ printf("%c%c%c%c ", vbuf[1][0], vbuf[1][1], vbuf[1][2], vbuf[1][3]);
+ printf("%c%c%c%c ", vbuf[2][0], vbuf[2][1], vbuf[2][2], vbuf[2][3]);
+ printf("%c%c%c%c\n", vbuf[3][0], vbuf[3][1], vbuf[3][2], vbuf[3][3]);
+}
+
+void run_test()
+{
+ Threads::sched.new_thread(thread, NULL, "thread")->wake();
+}
--- /dev/null
+// tests/mutex.cc
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/types.h>
+#include <kern/libc.h>
+#include <kern/time.h>
+#include <kern/thread.h>
+#include <util/lock.h>
+
+Lock::Lock testlock;
+
+void threadb(void *arg)
+{
+ testlock.lock();
+
+ while (true) {
+ Time::Time now;
+ Time::monotonic_clock.get_time(&now);
+
+ while (true) {
+ printf("B");
+
+ Time::Time now2;
+ Time::monotonic_clock.get_time(&now2);
+
+ if (now.seconds != now2.seconds)
+ break;
+ }
+
+ testlock.unlock();
+ testlock.lock();
+ }
+}
+
+void threada(void *arg)
+{
+ Threads::sched.new_thread(threadb, NULL, "thread b")->wake();
+
+ testlock.lock();
+
+ while (true) {
+ Time::Time now;
+ Time::monotonic_clock.get_time(&now);
+
+ while (true) {
+ printf("A");
+
+ Time::Time now2;
+ Time::monotonic_clock.get_time(&now2);
+
+ if (now.seconds != now2.seconds)
+ break;
+ }
+
+ testlock.unlock();
+ testlock.lock();
+ }
+}
+
+
+void run_test()
+{
+ Threads::sched.new_thread(threada, NULL, "thread a")->wake();
+}
--- /dev/null
+// tests/threads.cc
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kern/types.h>
+#include <kern/libc.h>
+#include <kern/time.h>
+#include <kern/thread.h>
+
+void wake(Time::KTimerEntry *entry)
+{
+ Threads::Blocker *b = static_cast<Threads::Blocker *>(entry->data);
+ b->wake();
+}
+
+void clearflag(Time::KTimerEntry *entry)
+{
+ int *flag = static_cast<int *>(entry->data);
+ *flag = 0;
+}
+
+void threadb(void *arg)
+{
+ Time::KTimerEntry timer(Time::monotonic_timers);
+ Threads::ThreadBlocker tb(curthread);
+
+ for (;;) {
+ volatile int flag = 1;
+ Time::Time now;
+ Time::monotonic_clock.get_time(&now);
+
+ now.seconds += 2;
+
+ timer.func = clearflag;
+ timer.data = (void *)(&flag);
+ timer.arm(now);
+
+ while (flag)
+ printf("B");
+
+ now.seconds += 2;
+
+ tb.blocked = true;
+ timer.func = wake;
+ timer.data = &tb;
+ timer.arm(now);
+
+ curthread->block(&tb);
+ }
+}
+
+extern System::Mem::Mappable Mem::physmem;
+
+void threada(void *arg)
+{
+ Time::KTimerEntry timer(Time::monotonic_timers);
+ Threads::ThreadBlocker tb(curthread);
+
+ Threads::sched.new_thread(threadb, NULL, "thread b")->wake();
+
+ for (;;) {
+ volatile int flag = 1;
+ Time::Time now;
+ Time::monotonic_clock.get_time(&now);
+
+ now.seconds++;
+
+ timer.func = clearflag;
+ timer.data = (void *)(&flag);
+ timer.arm(now);
+
+ while (flag)
+ printf("A");
+
+ now.seconds++;
+
+ tb.blocked = true;
+ timer.func = wake;
+ timer.data = &tb;
+ timer.arm(now);
+
+ curthread->block(&tb);
+ }
+}
+
+void run_test()
+{
+ Threads::sched.new_thread(threada, NULL, "thread a")->wake();
+}
--- /dev/null
+TOP := $(shell dirname `pwd -P`)
+COMP := lib
+include ../Makefile.head
+
+include c++/Makefile
+include c/Makefile
+
+TARGETS := objs
+
+include ../Makefile.tail
+
+objs: $(OBJS)
--- /dev/null
+DIR := c++/
+DIRS += $(DIR)
+
+RAW_CXXFILES := orb
+CXXFILES += $(RAW_CXXFILES:%=$(DIR)%)
--- /dev/null
+#include <System.h>
+
+namespace System {
+ namespace RunTime {
+ // This function could be made to do a binary search if we can
+ // manage to somehow sort the IFaceTables...
+
+ uintptr_t downcast(::System::_i_Object *obj,
+ const unsigned long *new_guid)
+ {
+ if (!obj)
+ return 0;
+
+ void unsupported_long_size();
+
+ if (sizeof(long) != 4 && sizeof(long) != 8)
+ unsupported_long_size();
+
+ IFaceTable *tbl = obj->info->concrete_IFaceTable;
+ unsigned long new_guid_first = *new_guid;
+
+ // This doesn't use guids_equal(), as that would eliminate the
+ // ability to cache the first word of new_guid. The compiler
+ // *might* do it anyway, but this code was written before
+ // guids_equal existed, and I don't want to risk removing an
+ // optimization by changing it now without testing how GCC
+ // behaves.
+
+ do {
+ if (*tbl->guid == new_guid_first &&
+ tbl->guid[1] == new_guid[1] &&
+ (sizeof(long) == 8 ||
+ (tbl->guid[2] == new_guid[2] &&
+ tbl->guid[3] == new_guid[3])))
+ break;
+
+ tbl++;
+ } while (tbl->guid);
+
+ uintptr_t ptr = reinterpret_cast<uintptr_t>(obj);
+
+ ptr += obj->info->concrete;
+ ptr += tbl->offset;
+
+ return ptr;
+ };
+
+ unsigned long get_pc()
+ {
+ return reinterpret_cast<unsigned long>(__builtin_return_address(0));
+ }
+ }
+}
--- /dev/null
+DIR := c/
+DIRS += $(DIR)
+
+RAW_CXXFILES :=
+CXXFILES += $(RAW_CXXFILES:%=$(DIR)%)
--- /dev/null
+# NOTE: due to suckage of make that I can't figure out how to work
+# around, you'll have to run make twice after you update a .cdl file
+# in order to make sure that any code that includes the generated
+# headers has been rebuilt. This is despite having an explicit
+# dependency of the .cc file on the normal .h, of the normal .h on
+# the generated .h, of the generated headers on the server directory,
+# and of the server directory on the CDL file. The CDL->directory rule
+# is used to generated the headers, but even if it's done specifically
+# for the .cc file up the chain, it doesn't rebuild the .cc file.
+
+include ../../Makefile.target
+TOP := $(shell pwd)
+
+INCS := -Iinclude -I../include/c++ -I../include/generated/c++ \
+ -I../include/c
+
+DEFS += -D_KERNEL
+
+CXXFLAGS += $(INCS) $(DEFS) -Wall -Werror -O3 -g3 -fno-rtti \
+ -fno-exceptions -fno-builtin
+
+ASFLAGS += $(INCS)
+
+.PHONY: symlinks all default rerun dep servers clean distclean
+
+all: default
+
+symlinks:
+ rm -f include/arch
+ ln -sfn arch-$(ARCH) include/arch
+
+# Make sure "make" treats thse as the right type of variable
+# when += is used later.
+OBJS :=
+DIRS :=
+
+# Must come first: contains boot entry code
+include arch/$(ARCH)/Makefile
+
+include core/Makefile
+include mem/Makefile
+include io/Makefile
+include lib/Makefile
+
+# Must come last: contains kernel target
+include arch/$(ARCH)/Makefile.final
+
+clean: $(EXTRACLEAN)
+ rm -rf $(foreach dir,$(DIRS),$(dir)*.o $(dir)*-server)
+ rm -f kernel kernel.stripped
+
+distclean: clean
+ rm -f .depend .gdb_history include/arch
+
+# Every .o file which has a corresponding .cdl file will
+# depend on the server stubs.
+
+SERVERS := $(foreach obj,$(OBJS),$(wildcard $(obj:.o=).cdl))
+SERVERS := $(SERVERS:%.cdl=%)
+
+$(SERVERS:%=%-server): %-server: %.cdl $(TOP)/../ifaces
+ rm -rf "$(BUILDDIR)/$@"
+ $(IDLC) -t $(ARCH) -l c++ -r -i "$(TOP)/../ifaces" -o "$(BUILDDIR)/$@" "$<"
+
+$(SERVERS:%=%.cc): %.cc: %-server
+
+servers: symlinks $(SERVERS:%=%-server)
+
+dep: servers
+ rm -f .depend
+ for i in $(OBJS:.o=); do \
+ for j in cc S; do \
+ if [ -e "$$i".$$j ]; then \
+ FILE="$$i".$$j; \
+ fi; \
+ done; \
+ \
+ if [ "$$FILE" ]; then \
+ $(CXX) $(CXXFLAGS) -M -MT "$$i".o "$$FILE" >> .depend; \
+ fi \
+ done; \
+ for i in $(SERVERS); do \
+ $(IDLC) -M -l c++ -r -i "$(TOP)/../ifaces" -o "$$i-server" "$$i.cdl" >> .depend; \
+ done
+
+ifeq (.depend,$(wildcard .depend))
+include .depend
+default: kernel kernel.stripped
+else
+rerun: dep
+ @$(MAKE)
+
+default: rerun
+endif
--- /dev/null
+PRE := lib/kernel/
+include $(PRE)time/Makefile
--- /dev/null
+The kernel library is linked to both the kernel and userspace. It
+consists primarily of:
+
+1. Code intended for use by userspace to communicate with the kernel
+2. Code which is needed by both the user and the kernel, but would
+not be appropriate in some other shared tree (such as the lowlevel
+lib or the util includes), due to reliance on some aspect of the
+kernel's functioning (or for some other reason).
+
--- /dev/null
+DIR := $(PRE)time/
+DIRS += $(DIR)
+
+RAW_CXXFILES := timer
+CXXFILES += $(RAW_CXXFILES:%=$(DIR)%)
--- /dev/null
+// lib/kernel/time/timer.cc -- Generic timer multiplexing
+//
+// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal with
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimers.
+//
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimers in the
+// documentation and/or other materials provided with the distribution.
+//
+// * The names of the Software's authors and/or contributors
+// may not be used to endorse or promote products derived from
+// this Software without specific prior written permission.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+// SOFTWARE.
+
+#include <kernel/time.h>
+#include <lowlevel/clock.h>
+
+namespace Time {
+ void TimerMux::arm(TimerEntry *entry, Time expiry)
+ {
+ AutoSpinLockRecIRQ autolock(lock);
+
+ bool was_empty = heap.empty();
+ Time oldexpiry;
+
+ if (!was_empty) {
+ oldexpiry = heap.get_top()->expiry;
+ } else {
+ // Shut up the compiler
+ oldexpiry.seconds = 0;
+ oldexpiry.nanos = 0;
+ }
+
+ entry->expiry = expiry;
+
+ if (entry->is_armed())
+ heap.requeue(entry);
+ else
+ heap.add(entry);
+
+ Time newexpiry = heap.get_top()->expiry;
+ if (was_empty || oldexpiry != newexpiry)
+ parent.arm(newexpiry);
+ }
+
+ void TimerMux::disarm(TimerEntry *entry)
+ {
+ AutoSpinLockRecIRQ autolock(lock);
+
+ if (entry->is_armed()) {
+ bool was_top = entry == heap.get_top();
+
+ heap.del(entry);
+
+ if (was_top)
+ parent.arm(heap.get_top()->expiry);
+ }
+ }
+
+ void TimerMux::run()
+ {
+ DroppableAutoSpinLockRecIRQ autolock(lock);
+
+ while (true) {
+ TimerEntry *top = heap.get_top();
+ Time now;
+ clock.get_time(&now);
+
+ if (now < top->expiry)
+ break;
+
+ heap.del(top);
+
+ autolock.unlock();
+ top->action();
+ autolock.lock();
+ }
+
+ if (!heap.empty())
+ parent.arm(heap.get_top()->expiry);
+ }
+
+ void TimerEntry::set_action(System::Events::Event *event)
+ {
+ // FIXME
+ }
+
+ void TimerEntry::action()
+ {
+ // FIXME
+ }
+}
+
+#include _KERNEL_SERVER(time/timer/footer.cc)
--- /dev/null
+class Time.TimerEntry : System.Time.Timer;
--- /dev/null
+#include <stdio.h>
+#include <stdint.h>
+
+int main(void)
+{
+ union {
+ struct {
+ uint8_t a:4;
+ uint8_t b:4;
+ };
+
+ uint8_t c;
+ } u;
+
+ u.c = 0xa5;
+
+ if (u.a == 0xa && u.b == 0x5)
+ printf("BE");
+ else if (u.a == 0x5 && u.b == 0xa)
+ printf("LE");
+ else
+ printf("UNKNOWN");
+
+ return 0;
+}
--- /dev/null
+#include <stdio.h>
+#include <stdint.h>
+
+int main(void)
+{
+ union {
+ uint32_t a;
+ uint8_t b[4];
+ };
+
+ a = 0x12345678;
+
+ if (b[0] == 0x12 && b[1] == 0x34 && b[2] == 0x56 && b[3] == 0x78)
+ printf("BE");
+ else if (b[3] == 0x12 && b[2] == 0x34 && b[1] == 0x56 && b[0] == 0x78)
+ printf("LE");
+ else
+ printf("UNKNOWN");
+
+ return 0;
+}