Initial snapshot

This commit is contained in:
Mercier Pierre-Olivier 2013-02-11 22:04:30 +01:00
commit fee4dd4e6d
373 changed files with 62144 additions and 0 deletions

View file

@ -0,0 +1,145 @@
#
# ---------- header -----------------------------------------------------------
#
# project kaneton
#
# license kanetno
#
# file /home/mycure/kaneton/export/data/snapshot/Makefile
#
# created julien quintard [tue jun 26 11:27:22 2007]
# updated julien quintard [sat feb 5 12:11:16 2011]
#
#
# ---------- dependencies -----------------------------------------------------
#
-include environment/env.mk
#
# ---------- directives -------------------------------------------------------
#
.SILENT:
.PHONY: main initialize clean clear prototypes \
headers build install info
#
# ---------- variables --------------------------------------------------------
#
_PYTHON_ ?= $(KANETON_PYTHON)
_MAKE_ ?= $(MAKE)
#
# ---------- default rule -----------------------------------------------------
#
ifeq ($(_SIGNATURE_),kaneton)
PATHS = $(dir $(_COMPONENTS_))
main:
for path in $(PATHS) ; do \
if [ -f "$${path}/Makefile" ] ; then \
$(call env_launch,$${path}/Makefile,,) ; \
fi \
done
else
main \
clear \
prototypes \
headers \
build install \
info \
clean: initialize
$(_MAKE_) -f Makefile $@
endif
#
# ---------- environment ------------------------------------------------------
#
initialize:
cd environment/ && \
$(_PYTHON_) initialize.py && \
cd ..
#
# ---------- conditional ------------------------------------------------------
#
ifeq ($(_SIGNATURE_),kaneton)
#
# ---------- environment ------------------------------------------------------
#
clean:
$(call env_launch,$(_CLEAN_SCRIPT_),,)
#
# ---------- variables --------------------------------------------------------
#
SUBDIRS := boot environment kaneton \
license sample test tool \
$(PATHS)
#
# ---------- clear ------------------------------------------------------------
#
clear:
for d in $(SUBDIRS) ; do \
$(call env_launch,$${d}/Makefile,clear,) ; \
done
$(call env_purge,)
#
# ---------- prototypes -------------------------------------------------------
#
prototypes:
for d in $(SUBDIRS) ; do \
$(call env_launch,$${d}/Makefile,prototypes,) ; \
done
#
# ---------- headers ----------------------------------------------------------
#
headers:
for d in $(SUBDIRS) ; do \
$(call env_launch,$${d}/Makefile,headers,) ; \
done
#
# ---------- boot -------------------------------------------------------------
#
build:
$(call env_launch,$(_MBL_SCRIPT_),build,)
install: main
$(call env_launch,$(_MBL_SCRIPT_),install,)
#
# ---------- information ------------------------------------------------------
#
info:
$(call env_print,,,)
$(call env_print,"--- ",blue,$(ENV_OPTION_NO_NEWLINE))
$(call env_print,http://kaneton.opaak.org,,)
$(call env_print,,,)
endif

View file

@ -0,0 +1,50 @@
#
# ---------- header -----------------------------------------------------------
#
# project kaneton
#
# license kaneton
#
# file /home/mycure/kane.../loader/ibm-pc.ia32/educational/Makefile
#
# created julien quintard [tue jun 12 20:34:41 2007]
# updated julien quintard [sat feb 5 11:09:53 2011]
#
#
# ---------- component --------------------------------------------------------
#
component := loader
#
# ---------- dependencies -----------------------------------------------------
#
include ../../../../environment/env.mk
#
# ---------- directives -------------------------------------------------------
#
.PHONY: main clear prototypes headers
#
# ---------- rules ------------------------------------------------------------
#
ifeq ($(behaviour),default)
main:
prototypes:
headers:
dependencies:
endif
#
# ---------- dependencies -----------------------------------------------------
#
-include ./$(_DEPENDENCY_MK_)

View file

@ -0,0 +1,290 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/kane...kaneton/kaneton/core/include/scheduler.h
*
* created julien quintard [wed jun 6 13:44:48 2007]
* updated julien quintard [sat feb 5 16:59:18 2011]
*/
#ifndef CORE_SCHEDULER_H
#define CORE_SCHEDULER_H 1
/*
* ---------- dependencies ----------------------------------------------------
*/
#include <core/types.h>
#include <core/error.h>
#include <core/id.h>
#include <machine/machine.h>
/*
* ---------- algorithms ------------------------------------------------------
*/
/*
* the supported scheduler algorithms.
*/
#define SCHEDULER_ALGORITHM_MFQ (1 << 0)
/*
* ---------- macros ----------------------------------------------------------
*/
/*
* the scheduler state either started or stopped.
*/
#define SCHEDULER_STATE_START 1
#define SCHEDULER_STATE_STOP 2
/*
* initial value for the scheduler quantum in milliseconds.
*/
#define SCHEDULER_QUANTUM TIMER_DELAY
/*
* the number of priorities i.e the number of queues.
*/
#define SCHEDULER_NPRIORITIES 60
/*
* timeslice bounds.
*/
#define SCHEDULER_TIMESLICE_HIGH 250
#define SCHEDULER_TIMESLICE_LOW 10
/*
* the timeslice granularity.
*/
#define SCHEDULER_GRANULARITY _scheduler.quantum
/*
* scheduling priorities.
*/
#define SCHEDULER_PRIORITY_HIGH SCHEDULER_NPRIORITIES - 1
#define SCHEDULER_PRIORITY_LOW 0
/*
* ---------- macro functions -------------------------------------------------
*/
/*
* this macro-function computes the thread's high precision character
* by taking into account both the task's and thread's priorities.
*/
#define SCHEDULER_CHARACTER(_id_) \
( \
{ \
o_task* _task_; \
o_thread* _thread_; \
\
assert(thread_get((_id_), &_thread_) == ERROR_OK); \
assert(task_get(_thread_->task, &_task_) == ERROR_OK); \
\
((_task_->priority - TASK_PRIORITY_BACKGROUND_LOW) * \
(_thread_->priority - THREAD_PRIORITY_LOW)); \
} \
)
/*
* this macro-function computes the priority for a giver thread. this
* is a low precision measurement of a thread's priority which is used
* for locating the proper scheduling queue.
*
* indeed, while the character lies in a large range, it is then
* reduced within the range [SCHEDULER_PRIORITY_LOW, SCHEDULER_PRIORITY_HIGH].
*/
#define SCHEDULER_PRIORITY(_thread_) \
( \
{ \
t_priority _character_; \
\
_character_ = SCHEDULER_CHARACTER((_thread_)); \
\
SCHEDULER_PRIORITY_LOW + \
((_character_ * \
(SCHEDULER_PRIORITY_HIGH - SCHEDULER_PRIORITY_LOW)) / \
((TASK_PRIORITY_KERNEL_HIGH - TASK_PRIORITY_BACKGROUND_LOW) * \
(THREAD_PRIORITY_HIGH - THREAD_PRIORITY_LOW))); \
} \
)
/*
* this macro-function takes a number of milliseconds and turns it
* into a valid timeslice according to the scheduler quantum.
*
* for example, with a quantum of 25ms and a given number of 264 milliseconds,
* this macro-function would return 275ms, the upper rounded number.
*/
#define SCHEDULER_SCALE(_timeslice_) \
((((_timeslice_) % SCHEDULER_GRANULARITY) != 0) ? \
(((_timeslice_) + SCHEDULER_GRANULARITY) - \
(_timeslice_) % SCHEDULER_GRANULARITY) \
: (_timeslice_))
/*
* this macro-function computes the timeslice given by the kernel to a
* thread based on its character.
*
* the character basically returns task->priority * thread->priority.
*
* this number is then turned into a timeslice i.e within the timeslice
* range [SCHEDULER_TIMESLICE_LOW, SCHEDULER_TIMESLICE_HIGH].
*
* finally, the timeslice is scaled i.e rounded up in order to fit the
* scheduling unit known as the quantum.
*/
#define SCHEDULER_TIMESLICE(_thread_) \
( \
{ \
t_priority _character_; \
t_timeslice _timeslice_; \
\
_character_ = SCHEDULER_CHARACTER((_thread_)); \
\
_timeslice_ = \
SCHEDULER_TIMESLICE_LOW + \
((_character_ * \
(SCHEDULER_TIMESLICE_HIGH - SCHEDULER_TIMESLICE_LOW)) / \
((TASK_PRIORITY_KERNEL_HIGH - TASK_PRIORITY_BACKGROUND_LOW) * \
(THREAD_PRIORITY_HIGH - THREAD_PRIORITY_LOW))); \
\
SCHEDULER_SCALE(_timeslice_); \
} \
)
/*
* ---------- types -----------------------------------------------------------
*/
/*
* the scheduler object which managers thread candidates for a
* given CPU specified by _cpu_.
*
* the _thread_ attribute represents the currently scheduled thread
* which operates at the priority _priority_. this thread stil has
* _timeslice_ milliseconds of execution time before a context switch
* occurs.
*
* the _state_ attribute represents the scheduler's current state, either
* started or stopped.
*/
typedef struct
{
i_cpu cpu;
i_thread thread;
t_timeslice timeslice;
t_priority priority;
/* FIXME[code to complete] */
t_state state;
machine_data(o_scheduler);
} o_scheduler;
/*
* the scheduler manager's structure which contains the quantum _quantum_
* i.e the smaller unit of execution time, the idle thread's identifier _idle_
* and the sets of schedulers, one scheduler per CPU.
*/
typedef struct
{
t_quantum quantum;
i_thread idle;
i_set schedulers;
machine_data(m_scheduler);
} m_scheduler;
/*
* the scheduler dispatcher.
*/
typedef struct
{
t_error (*scheduler_show)(i_cpu,
mt_margin);
t_error (*scheduler_dump)(void);
t_error (*scheduler_start)(i_cpu);
t_error (*scheduler_stop)(i_cpu);
t_error (*scheduler_quantum)(t_quantum);
t_error (*scheduler_yield)(void);
t_error (*scheduler_elect)(void);
t_error (*scheduler_add)(i_thread);
t_error (*scheduler_remove)(i_thread);
t_error (*scheduler_update)(i_thread);
t_error (*scheduler_initialize)(void);
t_error (*scheduler_clean)(void);
} d_scheduler;
/*
* ---------- prototypes ------------------------------------------------------
*
* ../../core/scheduler/scheduler-mfq.c
*/
/*
* ../../core/scheduler/scheduler-mfq.c
*/
t_error scheduler_show(i_cpu id,
mt_margin margin);
t_error scheduler_dump(void);
t_error scheduler_start(i_cpu id);
t_error scheduler_stop(i_cpu id);
t_error scheduler_quantum(t_quantum quantum);
t_error scheduler_yield(void);
t_error scheduler_elect(void);
t_error scheduler_add(i_thread id);
t_error scheduler_remove(i_thread id);
t_error scheduler_update(i_thread id);
t_error scheduler_exist(i_cpu id);
t_error scheduler_get(i_cpu id,
o_scheduler** object);
t_error scheduler_current(o_scheduler** scheduler);
t_error scheduler_initialize(void);
t_error scheduler_clean(void);
/*
* eop
*/
#endif

View file

@ -0,0 +1,463 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/Down...n/kaneton/core/scheduler/scheduler-mfq.c
*
* created matthieu bucchianeri [sat jun 3 22:36:59 2006]
* updated julien quintard [mon apr 11 13:23:09 2011]
*/
/*
* ---------- information -----------------------------------------------------
*
* the scheduler manager provides functionalities for managing the
* execution of the tasks and their threads on the possibly multiple CPUs
* of the computer.
*
* note that tasks are not scheduled. indeed, the active entity is the
* thread. therefore, adding a thread to the scheduler makes it a candidate
* for future election.
*
* the core function is the scheduler_elect() function which chooses
* the next thread to execute, though the current thread may be selected
* to continue its execution.
*/
#if (SCHEDULER_ALGORITHM == SCHEDULER_ALGORITHM_MFQ)
/*
* ---------- includes --------------------------------------------------------
*/
#include <kaneton.h>
/*
* include the machine-specific definitions required by the core.
*/
machine_include(scheduler);
/*
* ---------- externs ---------------------------------------------------------
*/
/*
* the kernel manager.
*/
extern m_kernel _kernel;
/*
* the cpu manager.
*/
extern m_cpu _cpu;
/*
* ---------- globals ---------------------------------------------------------
*/
/*
* the scheduler manager.
*/
m_scheduler _scheduler;
/*
* ---------- functions -------------------------------------------------------
*/
/*
* this function starts the given scheduler.
*
* steps:
*
* 1) retrieve the scheduler object.
* 2) change the scheduler's state.
* 3) call the machine.
*/
t_error scheduler_start(i_cpu id)
{
o_scheduler* scheduler;
/*
* 1)
*/
if (scheduler_get(id, &scheduler) != ERROR_OK)
CORE_ESCAPE("unable to retrieve the scheduler object");
/*
* 2)
*/
scheduler->state = SCHEDULER_STATE_START;
/*
* 3)
*/
if (machine_call(scheduler, start, id) != ERROR_OK)
CORE_ESCAPE("an error occured in the machine");
CORE_LEAVE();
}
/*
* this function stops the given scheduler.
*
* steps:
*
* 1) retrieve the scheduler object.
* 2) change the scheduler's state.
* 3) call the machine.
*/
t_error scheduler_stop(i_cpu id)
{
o_scheduler* scheduler;
/*
* 1)
*/
if (scheduler_get(id, &scheduler) != ERROR_OK)
CORE_ESCAPE("unable to retrieve the scheduler object");
/*
* 2)
*/
scheduler->state = SCHEDULER_STATE_STOP;
/*
* 3)
*/
if (machine_call(scheduler, stop, id) != ERROR_OK)
CORE_ESCAPE("an error occured in the machine");
CORE_LEAVE();
}
/*
* this function modifies the quantum.
*/
t_error scheduler_quantum(t_quantum quantum)
{
/* FIXME[code to complete] */
CORE_LEAVE();
}
/*
* this function enables the current thread to voluntarily relinquish its
* execution, hence permitting another thread to be scheduled immediately
* on this CPU.
*/
t_error scheduler_yield(void)
{
/* FIXME[code to complete] */
CORE_LEAVE();
}
/*
* this function elects the future thread to execute, taking care to
* save the currently executing one, should both the task and thread be
* still in a running state.
*
* note that this function may elect the already running thread if
* (i) it has not expired and (ii) there is no thread with a higher priority
* in the active list.
*
* finally, this function may detect that the scheduler has been stopped.
* should this occur, the elected thread is saved and the original kernel
* thread is specially scheduled, hence returning to its initial state.
*/
t_error scheduler_elect(void)
{
/* FIXME[code to complete] */
CORE_LEAVE();
}
/*
* this function adds a thread to the scheduler.
*/
t_error scheduler_add(i_thread id)
{
/* FIXME[code to complete] */
CORE_LEAVE();
}
/*
* this function removes a thread from the scheduler.
*/
t_error scheduler_remove(i_thread id)
{
/* FIXME[code to complete] */
CORE_LEAVE();
}
/*
* this function updates a thread from the scheduler after its priority
* has changed.
*
* note that the easiest way could seem to be to remove then add the
* thread, in which case it would be added to its proper queue depending
* on its new priority. unfortunately this solution would make the thread
* lose its remaining timeslices. besides, if the thread to remove is the
* currently scheduled thread, removing it would incurr a yield. in this
* case, the thread would no longer be scheduled and would therefore not
* have the chance to add itself back to the scheduler.
*/
t_error scheduler_update(i_thread id)
{
/* FIXME[code to complete] */
CORE_LEAVE();
}
/*
* this function returns true if the scheduler for the given CPU identifier
* exists.
*/
t_error scheduler_exist(i_cpu id)
{
if (set_exist(_scheduler.schedulers, id) != ERROR_TRUE)
CORE_FALSE();
CORE_TRUE();
}
/*
* this function retrives the scheduler for the given CPU.
*
* steps:
*
* 0) verify the arguments.
* 1) retrieve the object from the set of schedulers.
*/
t_error scheduler_get(i_cpu id,
o_scheduler** object)
{
/*
* 0)
*/
if (object == NULL)
CORE_ESCAPE("the 'object' argument is null");
/*
* 1)
*/
if (set_get(_scheduler.schedulers, id, (void**)object) != ERROR_OK)
CORE_ESCAPE("unable to retrieve the object from the set of schedulers");
CORE_LEAVE();
}
/*
* this function returns the scheduler object for the current CPU.
*
* steps:
*
* 0) verify the arguments.
* 1) retrieve the current CPU identifier.
* 2) retrieve the current scheduler object.
*/
t_error scheduler_current(o_scheduler** scheduler)
{
i_cpu cpu;
/*
* 0)
*/
if (scheduler == NULL)
CORE_ESCAPE("the 'scheduler' argument is null");
/*
* 1)
*/
if (cpu_current(&cpu) != ERROR_OK)
CORE_ESCAPE("unable to retrieve the current CPU object");
/*
* 2)
*/
if (set_get(_scheduler.schedulers, cpu, (void**)scheduler) != ERROR_OK)
CORE_ESCAPE("unable to retrieve the scheduler from the set");
CORE_LEAVE();
}
/*
* this function initializes the scheduler manager.
*
* steps:
*
* 1) display a message.
* 2) initialize the manager's structure.
* 3) initialize the quantum.
* 4) retrieve the number of CPUs.
* 5) reserve the set of schedulers.
* 6) go through the CPUs.
* a) retrieve the CPU object.
* b) build the scheduler object.
* g) add the scheduler to the set of schedulers.
* 7) retrieve the currently running scheduler.
* 8) set the scheduler's current thread as being the kernel thread.
* 9) call the machine.
*/
t_error scheduler_initialize(void)
{
o_scheduler* scheduler;
t_setsz ncpus;
s_iterator it;
t_state st;
o_cpu* o;
/*
* 1)
*/
module_call(console, message,
'+', "initializing the scheduler manager\n");
/*
* 2)
*/
memset(&_scheduler, 0x0, sizeof (m_scheduler));
/*
* 3)
*/
_scheduler.quantum = SCHEDULER_QUANTUM;
/*
* 4)
*/
if (set_size(_cpu.cpus, &ncpus) != ERROR_OK)
CORE_ESCAPE("unable to retrieve the number of active CPUs");
/*
* 5)
*/
if (set_reserve(array,
SET_OPTION_ALLOCATE,
ncpus,
sizeof (o_scheduler),
&_scheduler.schedulers) != ERROR_OK)
CORE_ESCAPE("unable to reserve a set for the schedulers");
/*
* 6)
*/
set_foreach(SET_OPTION_FORWARD, _cpu.cpus, &it, st)
{
o_scheduler scheduler;
/*
* a)
*/
if (set_object(_cpu.cpus, it, (void**)&o) != ERROR_OK)
CORE_ESCAPE("unable to retrieve the CPU object");
/*
* b)
*/
scheduler.cpu = o->id;
scheduler.thread = ID_UNUSED;
scheduler.timeslice = _scheduler.quantum;
scheduler.priority = 0;
scheduler.state = SCHEDULER_STATE_STOP;
/*
* g)
*/
if (set_append(_scheduler.schedulers, &scheduler) != ERROR_OK)
CORE_ESCAPE("unable to append the CPU's scheduler to the set");
}
/*
* 7)
*/
if (scheduler_current(&scheduler) != ERROR_OK)
CORE_ESCAPE("unable to retrieve the current CPU's scheduler");
/*
* 8)
*/
scheduler->thread = _kernel.thread;
/*
* 9)
*/
if (machine_call(scheduler, initialize) != ERROR_OK)
CORE_ESCAPE("an error occured in the machine");
CORE_LEAVE();
}
/*
* this function just reinitializes the scheduler manager.
*
* steps:
*
* 1) display a message.
* 2) call the machine.
*/
t_error scheduler_clean(void)
{
/*
* 1)
*/
module_call(console, message,
'+', "cleaning the scheduler manager\n");
/*
* 2)
*/
if (machine_call(scheduler, clean) != ERROR_OK)
CORE_ESCAPE("an error occured in the machine");
CORE_LEAVE();
}
#endif

View file

@ -0,0 +1,704 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/kane.../architecture/ia32/educational/context.c
*
* created renaud voltz [tue apr 4 03:08:03 2006]
* updated julien quintard [mon feb 7 15:53:52 2011]
*/
/*
* ---------- information -----------------------------------------------------
*
* this file contains functions related to the IA32 context management.
*
* the ia32/educational implementation makes use of a single TSS - Task
* State Segment which describes the currently executing context.
*
* the context switch mechanism consists in saving the currently executing
* thread's state and loading the future's. note however that the CPU
* performs some saving/restoring automatically.
*
* basically, things go as follows. a task is running, say in CPL3 i.e a guest
* task. when the timer interrupt occurs, for example, the privilege changes
* from CPL3 to CPL0. the CPU, noticing this change in privilege saves some
* of the thread's context---SS, ESP, EFLAGS, CS, EIP and possible an error
* code---on a special stack referred to as the thread's pile i.e a stack
* specifically used whenever the privilege changes. note that, should no
* change in privilege occur, the registers would be stored on the thread's
* current stack.
*
* at this point, the processor executes the handler shell (cf handler.c)
* which pre-handles an interrupt depending on its nature: exception, IRQ etc.
* besides, the handler shell calls the ARCHITECTURE_CONTEXT_SAVE()
* macro-function which is at the heart of the context switching mechanism.
*
* once the interrupt has been treated, the ARCHITECTURE_CONTEXT_RESTORE()
* macro-function restores the necessary. finally, the 'iret' instruction
* is called. the CPU noticing that a privilege change had occured, restores
* the thread's context by fetching the registers it pushed from the
* thread's pile (or from the thread's stack if the privilege had not changed).
*
* the whole context switch mechanism therefore relies on interrupts. more
* precisely the idea for context switching from a thread A to a thread B
* is to (i) save the thread A's context but this is done naturally when
* A gets interrupted and (ii) change the TSS so that thread B gets referenced,
* hence making the CPU think B got interrupted (instead of A). therefore,
* when returning from the interrupt, the CPU will restore B's context
* rather than A's. this is how A got interrupted, its context saved
* and B's execution got resumed.
*/
/*
* ---------- includes --------------------------------------------------------
*/
#include <kaneton.h>
/*
* ---------- externs ---------------------------------------------------------
*/
/*
* the architecture manager.
*/
extern am _architecture;
/*
* ---------- externs ---------------------------------------------------------
*/
/*
* kernel manager.
*/
extern m_kernel _kernel;
/*
* thread manager.
*/
extern m_thread _thread;
/*
* ---------- functions -------------------------------------------------------
*/
/*
* this function dumps the given context.
*/
t_error architecture_context_dump(as_context context)
{
module_call(console, message,
'#',
"context: ds(0x%x) edi(0x%x) esi(0x%x) ebp(0x%x) _esp(0x%x)\n",
context.ds & 0xffff,
context.edi,
context.esi,
context.ebp,
context._esp);
module_call(console, message,
'#',
" ebx(0x%x) edx(0x%x) ecx(0x%x) eax(0x%x) error(0x%x)\n",
context.ebx,
context.edx,
context.ecx,
context.eax,
context.error);
module_call(console, message,
'#',
" eip(0x%x) cs(0x%x) eflags(0x%x) esp(0x%x) ss(0x%x)\n",
context.eip,
context.cs & 0xffff,
context.eflags,
context.esp,
context.ss & 0xffff);
MACHINE_LEAVE();
}
/*
* this function builds the given context, initializes its attributes.
*
* steps:
*
* 1) retrieve the thread and task objects.
* 2) depending on the thread's task class.
* A) if this thread is a kernel thread i.e a ring0 thread, this means
* that its context will be saved on its stack. besides, since no change
* in privilege will occur when interrupted, there is no need for a
* pile...
* a) set the pile attributes to zero.
* B) otherwise...
* a) set the thread's pile size.
* b) allocate a pile for the thread i.e a stack which is used by the
* processor to store the context whenever the execution privilege
* changes; for example whenever a guest task running in CP3 is
* interrupted by the timer.
* c) set the pile pointer to the end since IA32 stacks grow towards
* the lower addresses.
* 3) initialize the IA32 context's registers to zero.
* 4) initialize the eflags by activating the first bit (mandatory) but
* also the IF flags to that maskable interrupts get triggered. besides,
* allow driver tasks to perform I/O operations by setting the
* appropriate IOPL.
* 5) set the context's segment selectors according to the task's class.
* 6) complete the thread's initial IA32 context by setting ESP and EIP.
* 7) set the static stack pointer to the end since stacks grow towards
* the lower addresses.
* 8) depending on the thread's task class.
* A) if the thread is a kernel thread, set the initial context's position
* on the stack. indeed, let's recall that ring0 thread's contexts
* are stored on their stack since no change in privilege occurs.
* B) otherwise, set its position on the thread's pile i.e special kernel
* stack.
* 9) finally set the thread's initial IA32 context. note that this step
* is ignored for the kernel thread. indeed, this thread is the one
* setting up the whole kernel. once the scheduler is started, an interrupt
* will be triggered hence interrupting the running thread i.e the kernel
* thread, hence saving its context. since this special thread will always
* start with its context being saved, there is no need to do it now.
* on the contrary, the other threads will begin with their context being
* restored in order to be scheduled for the first time.
*/
t_error architecture_context_build(i_thread id)
{
o_task* task;
o_thread* thread;
as_context ctx;
/*
* 1)
*/
if (thread_get(id, &thread) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the thread object");
if (task_get(thread->task, &task) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the task object");
/*
* 2)
*/
if (task->class == TASK_CLASS_KERNEL)
{
/*
* A)
*/
/*
* a)
*/
thread->machine.pile.base = 0x0;
thread->machine.pile.size = 0x0;
thread->machine.pile.pointer = 0x0;
}
else
{
/*
* B)
*/
/*
* a)
*/
thread->machine.pile.size = ARCHITECTURE_HANDLER_PILE_SIZE;
/*
* b)
*/
if (map_reserve(task->as,
MAP_OPTION_NONE,
thread->machine.pile.size,
PERMISSION_READ | PERMISSION_WRITE,
&thread->machine.pile.base) != ERROR_OK)
MACHINE_ESCAPE("unable to reserve a map for the thread's pile");
/*
* c)
*/
thread->machine.pile.pointer =
thread->machine.pile.base + thread->machine.pile.size - 16;
}
/*
* 3)
*/
memset(&ctx, 0x0, sizeof (as_context));
/*
* 4)
*/
ctx.eflags =
ARCHITECTURE_REGISTER_EFLAGS_01 |
ARCHITECTURE_REGISTER_EFLAGS_IF;
if (task->class == TASK_CLASS_DRIVER)
{
ctx.eflags |=
ARCHITECTURE_REGISTER_EFLAGS_IOPL_SET(ARCHITECTURE_PRIVILEGE_DRIVER);
}
/*
* 5)
*/
switch (task->class)
{
case TASK_CLASS_KERNEL:
{
ctx.cs = _thread.machine.selectors.kernel.cs;
ctx.ds = _thread.machine.selectors.kernel.ds;
ctx.ss = _thread.machine.selectors.kernel.ds;
break;
}
case TASK_CLASS_DRIVER:
{
ctx.cs = _thread.machine.selectors.driver.cs;
ctx.ds = _thread.machine.selectors.driver.ds;
ctx.ss = _thread.machine.selectors.driver.ds;
break;
}
case TASK_CLASS_SERVICE:
{
ctx.cs = _thread.machine.selectors.service.cs;
ctx.ds = _thread.machine.selectors.service.ds;
ctx.ss = _thread.machine.selectors.service.ds;
break;
}
case TASK_CLASS_GUEST:
{
ctx.cs = _thread.machine.selectors.guest.cs;
ctx.ds = _thread.machine.selectors.guest.ds;
ctx.ss = _thread.machine.selectors.guest.ds;
break;
}
}
/*
* 6)
*/
ctx.esp = thread->stack.base + thread->stack.size - 16;
ctx.eip = thread->entry;
/*
* 7)
*/
thread->machine.stack.pointer = thread->stack.base + thread->stack.size - 16;
/*
* 8)
*/
if (task->class == TASK_CLASS_KERNEL)
{
/*
* A)
*/
thread->machine.context =
thread->machine.stack.pointer - sizeof (as_context);
}
else
{
/*
* B)
*/
thread->machine.context =
thread->machine.pile.pointer - sizeof (as_context);
}
/*
* 9)
*/
if (thread->id != _kernel.thread)
{
if (architecture_context_set(thread->id, &ctx) != ERROR_OK)
MACHINE_ESCAPE("unable to set the context");
}
MACHINE_LEAVE();
}
/*
* this function destroys a thread's context.
*
* steps:
*
* 1) retrieve the thread and task object.
* 2) if the thread has a pile---i.e is not a kernel thread---release it.
*/
t_error architecture_context_destroy(i_thread id)
{
o_task* task;
o_thread* thread;
/*
* 1)
*/
if (thread_get(id, &thread) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the thread object");
if (task_get(thread->task, &task) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the task object");
/*
* 2)
*/
if (task->class != TASK_CLASS_KERNEL)
{
if (map_release(task->as,
thread->machine.pile.base) != ERROR_OK)
MACHINE_ESCAPE("unable to release the thread's pile");
}
MACHINE_LEAVE();
}
/*
* this function sets up the context switch mechanism.
*
* steps:
*
* 1) retrieve the kernel address space object.
* 2) reserve the KIS - Kernel Interrupt Stack and set its pointer to
* the end since stacks grow towards low addresses. note that this
* special stack resides within the kernel, as its name indicates, and
* is used to handle interrupts.
* 3) reserve a memory area for the TSS.
* 4) build the initial TSS.
* 5) update the TSS in order to represent the currently executing thread.
* since the current thread is the kernel thread, which runs in ring0,
* no ring0 stack needs to be provided, hence SS0 and ESP0 are ignored.
* 6) activate the TSS.
* 7) retrieve the segment selectors associated with the
* kernel/driver/service/guest code/data segments. these segment selectors
* will be used whenever a thread of the associated task class will
* be created.
*/
t_error architecture_context_setup(void)
{
as_tss* tss;
o_as* as;
/*
* 1)
*/
if (as_get(_kernel.as, &as) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the address space object");
/*
* 2)
*/
_architecture.kernel.kis.size = ARCHITECTURE_HANDLER_KIS_SIZE;
if (map_reserve(_kernel.as,
MAP_OPTION_SYSTEM,
_architecture.kernel.kis.size,
PERMISSION_READ | PERMISSION_WRITE,
&_architecture.kernel.kis.base) != ERROR_OK)
MACHINE_ESCAPE("unable to reserve the TSS memory area");
_architecture.kernel.kis.pointer =
_architecture.kernel.kis.base + (_architecture.kernel.kis.size - 16);
/*
* 3)
*/
if (map_reserve(_kernel.as,
MAP_OPTION_SYSTEM,
ARCHITECTURE_TSS_SIZE,
PERMISSION_READ | PERMISSION_WRITE,
&_thread.machine.tss) != ERROR_OK)
MACHINE_ESCAPE("unable to reserve the TSS memory area");
/*
* 4)
*/
if (architecture_tss_build(_thread.machine.tss, &tss) != ERROR_OK)
MACHINE_ESCAPE("unable to build the initial TSS");
/*
* 5)
*/
if (architecture_tss_update(tss,
ARCHITECTURE_TSS_SS0_NULL,
ARCHITECTURE_TSS_ESP0_NULL,
ARCHITECTURE_TSS_IO) != ERROR_OK)
MACHINE_ESCAPE("unable to build the TSS");
/*
* 6)
*/
if (architecture_tss_activate(tss) != ERROR_OK)
MACHINE_ESCAPE("unable to activate the system's TSS");
/*
* 7)
*/
if (architecture_gdt_selector(
ARCHITECTURE_GDT_INDEX_KERNEL_CODE,
ARCHITECTURE_PRIVILEGE_KERNEL,
&_thread.machine.selectors.kernel.cs) != ERROR_OK)
MACHINE_ESCAPE("unable to build the kernel code segment selector");
if (architecture_gdt_selector(
ARCHITECTURE_GDT_INDEX_KERNEL_DATA,
ARCHITECTURE_PRIVILEGE_KERNEL,
&_thread.machine.selectors.kernel.ds) != ERROR_OK)
MACHINE_ESCAPE("unable to build the kernel data segment selector");
if (architecture_gdt_selector(
ARCHITECTURE_GDT_INDEX_DRIVER_CODE,
ARCHITECTURE_PRIVILEGE_DRIVER,
&_thread.machine.selectors.driver.cs) != ERROR_OK)
MACHINE_ESCAPE("unable to build the driver code segment selector");
if (architecture_gdt_selector(
ARCHITECTURE_GDT_INDEX_DRIVER_DATA,
ARCHITECTURE_PRIVILEGE_DRIVER,
&_thread.machine.selectors.driver.ds) != ERROR_OK)
MACHINE_ESCAPE("unable to build the driver data segment selector");
if (architecture_gdt_selector(
ARCHITECTURE_GDT_INDEX_SERVICE_CODE,
ARCHITECTURE_PRIVILEGE_SERVICE,
&_thread.machine.selectors.service.cs) != ERROR_OK)
MACHINE_ESCAPE("unable to build the service code segment selector");
if (architecture_gdt_selector(
ARCHITECTURE_GDT_INDEX_SERVICE_DATA,
ARCHITECTURE_PRIVILEGE_SERVICE,
&_thread.machine.selectors.service.ds) != ERROR_OK)
MACHINE_ESCAPE("unable to build the service data segment selector");
if (architecture_gdt_selector(
ARCHITECTURE_GDT_INDEX_GUEST_CODE,
ARCHITECTURE_PRIVILEGE_GUEST,
&_thread.machine.selectors.guest.cs) != ERROR_OK)
MACHINE_ESCAPE("unable to build the guest code segment selector");
if (architecture_gdt_selector(
ARCHITECTURE_GDT_INDEX_GUEST_DATA,
ARCHITECTURE_PRIVILEGE_GUEST,
&_thread.machine.selectors.guest.ds) != ERROR_OK)
MACHINE_ESCAPE("unable to build the guest data segment selector");
MACHINE_LEAVE();
}
/*
* this function switches execution to the specified thread.
*/
t_error architecture_context_switch(i_thread current,
i_thread future)
{
/* FIXME[code to complete] */
MACHINE_LEAVE();
}
/*
* this function pushes the given arguments on a thread's stack. this way,
* the thread will be able to access these values at startup.
*
* steps:
*
* 1) retrieve the thread and task objects.
* 2) retrieve the thread's context.
* 3) update the thread's stack pointer by decreasing it since the
* arguments are going to be stored at the top of it.
* 4) write the arguments to the thread's stack.
* 5) update the thread's context.
*/
t_error architecture_context_arguments(i_thread id,
void* arguments,
t_vsize size)
{
s_thread_context context;
o_thread* thread;
o_task* task;
/*
* 1)
*/
if (thread_get(id, &thread) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the thread object");
if (task_get(thread->task, &task) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the task object");
/*
* 2)
*/
if (thread_store(thread->id, &context) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the thread context");
/*
* 3)
*/
context.sp -= size;
/*
* 4)
*/
if (as_write(task->as, arguments, size, context.sp) != ERROR_OK)
MACHINE_ESCAPE("unable to store the arguments on the thread's stack");
/*
* 5)
*/
if (thread_load(thread->id, context) != ERROR_OK)
MACHINE_ESCAPE("unable to update the thread context");
MACHINE_LEAVE();
}
/*
* this function retrieves the IA32 context of the given thread.
*
* note that the interrupted task's context has been stored in its
* pile, i.e ring0 stack (excepts for kernels threads). since the pile is
* only mapped in the task's address space, this function, running in the
* kernel environment, cannot access it directly. therefore, the as_read()
* function is used to temporarily map the necessary pages in the kernel
* address space.
*
* steps:
*
* 0) verify the arguments.
* 1) retrieve the thread and task objects.
* 2) read the thread's context from its pile.
*/
t_error architecture_context_get(i_thread id,
as_context* context)
{
o_thread* thread;
o_task* task;
/*
* 0)
*/
if (context == NULL)
MACHINE_ESCAPE("the 'context' argument is null");
/*
* 1)
*/
if (thread_get(id, &thread) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the thread object");
if (task_get(thread->task, &task) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the task object");
/*
* 2)
*/
if (as_read(task->as,
thread->machine.context,
sizeof (as_context),
context) != ERROR_OK)
MACHINE_ESCAPE("unable to read the thread's IA32 context");
MACHINE_LEAVE();
}
/*
* this function updates the context of a given thread.
*
* note that the interrupted task's context has been stored in its
* pile, i.e ring0 stack (except for kernel threads). since the pile is
* only mapped in the task's address space, this function, running in the
* kernel environment, cannot access it directly. therefore, the as_write()
* function is used to temporarily map the necessary pages in the kernel
* address space.
*
* steps:
*
* 0) verify the arguments.
* 1) retrieve the thread and task objects.
* 2) update the thread's context stored in its pile by writing its address
* space.
*/
t_error architecture_context_set(i_thread id,
as_context* context)
{
o_thread* thread;
o_task* task;
/*
* 0)
*/
if (context == NULL)
MACHINE_ESCAPE("the 'context' argument is null");
/*
* 1)
*/
if (thread_get(id, &thread) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the thread object");
if (task_get(thread->task, &task) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the task object");
/*
* 2)
*/
if (as_write(task->as,
context,
sizeof (as_context),
thread->machine.context) != ERROR_OK)
MACHINE_ESCAPE("unable to write the thread's IA32 context");
MACHINE_LEAVE();
}

View file

@ -0,0 +1,444 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/kane...hitecture/ia32/educational/environment.c
*
* created julien quintard [thu jan 13 23:13:50 2011]
* updated julien quintard [tue apr 12 07:40:26 2011]
*/
/*
* ---------- information -----------------------------------------------------
*
* this file contains functions for initializing the environments of a
* task, especially regarding its address space.
*
* note that the kernel is treated separately from the servers i.e drivers,
* services and guests.
*
* [XXX:improvement] in the server initialization, needless to map the kernel
* code and stack. instead the handler shells should be
* mapped i.e the .handler section.
*/
/*
* ---------- includes --------------------------------------------------------
*/
#include <kaneton.h>
/*
* ---------- externs ---------------------------------------------------------
*/
/*
* the init structure.
*/
extern s_init* _init;
/*
* the kernel manager.
*/
extern m_kernel _kernel;
/*
* the thread manager.
*/
extern m_thread _thread;
/*
* the segment manager.
*/
extern m_segment _segment;
/*
* the architecture manager.
*/
extern am _architecture;
/*
* ---------- functions -------------------------------------------------------
*/
/*
* this function initializes the kernel's environment.
*
* steps:
*
* 1) retrieve the address space objec.
* 2) set the kernel address space's page directory by importing the
* page directory set up by the boot loader.
* 3) generate the PDBR - Page Directory Base Register, also known as
* the CR3, based on the kernel page directory's physical address and
* some flags.
* 4) set the page directory virtual address as being an identity mapping
* of the physical address. this is how the boot loader set things up.
* 5) set the current page directory as being the kernel's one by updating
* the microprocessor CR3 register.
* 6) update the kernel page directory---which is assumed to have been mapped
* by the boot loader through the identity mapping technique---in order
* to set up the mirroring entry. this entry wastes the last 4MB
* of memory and are used for accessing the kernel page directory and
* tables without mapping anything, hence preventing infinite loops.
* note that the entry references the page directory itself, making the
* page directory act as a page table whenever accessed through the
* mirror page directory entry.
* 7) the last 4MB of virtual memory are not accessible since the mirroring
* page directory entry and the referenced page table's entries---the
* page directory itself---are used for the mirroring mechanism. these
* 4MB are wasted and must therefore not be reservable or the kernel may
* end up overwritten the mirroring entries. a region covering the last
* 4MB of virtual memory is therefore injected.
* note that the region injected references an ID_UNUSED-identified
* segment in order to avoid having to reserve a 4MB segment. this is
* possible because region_inject() does not check if the referenced
* segment identifer is valid.
* 8) this step consists in cleaning the page directory set up by the boot
* loader, now used by the kernel, by unmapping any page which is not
* related to the fundamental regions provided by the boot loader.
* go through all the pre-reserved regions provided by the bootloader plus
* one. this additional iteration is required in order to clean the
* mapped pages from the last region to the end of the virtual address
* space.
* a) compute the page directory and table end indexes for the given
* region. note that for the extra iteration, the end indexes are
* set to their maximum so that every page table entry of every page
* directory entry following the last region is cleaned.
* b) go through the involved page directory entries.
* i) if the page directory entry does not reference a page table or
* is used as the mirroring entry, leave it. otherwise...
* #1) retrieve the page table referenced by the page directory entry.
* note that the boot loader relied on the identity mapping technique
* for its paging set up. identity mapping is therefore used to
* retrieve the page table virtual address.
* #2) go through the page table's involved entries.
* #a) if the page table entry is used, delete the reference as
* this mapping must not be very important since not related to
* the pre-reserved regions provided by the boot loader.
* c) if the treated region is not the extra one, compute the next
* page directory and table start indexes as starting right after
* the end of the region i.e address + size.
* 9) flush the whole TLB, resetting all the address translations.
* 10) register the kernel PDBR as being the PDBR on which to switch whenever
* an interrupt occurs.
*/
t_error architecture_environment_kernel(i_as id)
{
i_region useless;
at_cr3 pdbr;
o_as* as;
o_region* r;
/*
* 1)
*/
if (as_get(id, &as) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the address space object");
/*
* 2)
*/
as->machine.pd = _init->machine.pd;
/*
* 3)
*/
if (architecture_paging_pdbr(as->machine.pd,
ARCHITECTURE_REGISTER_CR3_PCE |
ARCHITECTURE_REGISTER_CR3_PWB,
&pdbr) != ERROR_OK)
MACHINE_ESCAPE("unable to build the CR3 register's content");
/*
* 5)
*/
/* FIXME[make the page directory provided by the boot loader the system's
current page directory by updating the necessary IA32 hardware
structure and possibly storing the value in a globally accessible
variable such as a manager] */
/*
* 6)
*/
/* FIXME[create the mirroring entry by adding the page directory's
address] */
/*
* 7)
*/
if ((r = malloc(sizeof (o_region))) == NULL)
MACHINE_ESCAPE("unable to allocate memory for the region object");
r->address = ARCHITECTURE_PAGING_ADDRESS(ARCHITECTURE_PD_MIRROR, 0);
r->segment = ID_UNUSED;
r->offset = 0x0;
r->size = ARCHITECTURE_PT_SIZE * ___kaneton$pagesz;
r->options = REGION_OPTION_NONE;
if (region_inject(as->id, r, &useless) != ERROR_OK)
MACHINE_ESCAPE("unable to inject the mirroring region");
/*
* 8)
*/
/* FIXME[go through the registered regions and remove the
page table entries which do not correspond to these
regions. this is necessary because the boot loader
mapped an awful lot of pages which must now be cleaned]
pde.start = 0;
pte.start = 0;
for (i = 0; i < (_init->nregions + 1); i++)
{
if (i != _init->nregions)
{
pde.end = ARCHITECTURE_PD_INDEX(_init->regions[i].address);
pte.end = ARCHITECTURE_PT_INDEX(_init->regions[i].address);
}
else
{
pde.end = ARCHITECTURE_PD_SIZE - 1;
pte.end = ARCHITECTURE_PT_SIZE;
}
for (pde.index = pde.start;
pde.index <= pde.end;
pde.index++)
{
if ((pde.index != ARCHITECTURE_PD_MIRROR) &&
(pd[pde.index] & ARCHITECTURE_PDE_PRESENT))
{
pt = (at_pt)ARCHITECTURE_PDE_ADDRESS(pd[pde.index]);
for (pte.index = (pde.index == pde.start ? pte.start : 0);
pte.index < (pde.index == pde.end ?
pte.end : ARCHITECTURE_PT_SIZE);
pte.index++)
{
if (pt[pte.index] & ARCHITECTURE_PTE_PRESENT)
{
if (architecture_pt_delete(pt, pte.index) != ERROR_OK)
MACHINE_ESCAPE("unable to delete the page "
"table entry");
}
}
}
}
if (i != _init->nregions)
{
pde.start = ARCHITECTURE_PD_INDEX(_init->regions[i].address +
_init->regions[i].size);
pte.start = ARCHITECTURE_PT_INDEX(_init->regions[i].address +
_init->regions[i].size);
}
}
*/
/*
* 9)
*/
if (architecture_tlb_flush() != ERROR_OK)
MACHINE_ESCAPE("unable to flush the TLB");
/*
* 10)
*/
_architecture.kernel.pdbr = pdbr;
MACHINE_LEAVE();
}
/*
* this function sets up the environment of a server i.e drivers, services
* and guests.
*
* steps:
*
* 1) retrieve the address space object.
* 2) reserve a system segment.
* 3) use this segment for the given address space's page directory.
* 4) map the page directory, initialize it and unmap it.
* 5) locate the segment containing the system's TSS and map it in
* the given address space. note that the TSS is mapped at the same
* virtual address as in the kernel.
* 6) locate the segment containing the system's GDT and map it in the
* given address space, again at the same virtual address as the kernel's.
* 7) locate the segment containing the system's IDT and map it in the
* given address space, again at the same virtual address as the kernel's.
* 8) locate the segment containing the kernel code and map it in the given
* address space. note that the identity mapping technique is used here.
* 9) locate the segment containing the kernel stack and map it in the given
* address space, note that the identity mapping technique is used here.
*/
t_error architecture_environment_server(i_as id)
{
i_segment segment;
i_region region;
o_as* as;
o_region* r;
o_segment* s;
/*
* 1)
*/
if (as_get(id, &as) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the address space object");
/*
* 2)
*/
if (segment_reserve(as->id,
___kaneton$pagesz,
PERMISSION_READ | PERMISSION_WRITE,
SEGMENT_OPTION_SYSTEM,
&segment) != ERROR_OK)
MACHINE_ESCAPE("unable to reserve a segment");
if (segment_get(segment, &s) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the segment object");
/*
* 3)
*/
as->machine.pd = s->address;
/*
* 4)
*/
/* FIXME[map the server's page directory, initialize it and
unmap it] */
/*
* 5)
*/
if (region_locate(_kernel.as,
_thread.machine.tss,
&region) == ERROR_FALSE)
MACHINE_ESCAPE("unable to locate the region in which the TSS lies");
if (region_get(_kernel.as, region, &r) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the region object");
if (region_reserve(as->id,
r->segment,
0x0,
REGION_OPTION_FORCE |
REGION_OPTION_NONE,
_thread.machine.tss,
r->size,
&region) != ERROR_OK)
MACHINE_ESCAPE("unable to reserve the region mapping the TSS");
/*
* 6)
*/
if (region_locate(_kernel.as,
(t_vaddr)_segment.machine.gdt.table,
&region) == ERROR_FALSE)
MACHINE_ESCAPE("unable to locate the region in which the GDT lies");
if (region_get(_kernel.as, region, &r) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the region object");
if (region_reserve(as->id,
r->segment,
0x0,
REGION_OPTION_FORCE |
REGION_OPTION_NONE,
(t_vaddr)_segment.machine.gdt.table,
___kaneton$pagesz,
&region) != ERROR_OK)
MACHINE_ESCAPE("unable to reserve the region mapping the GDT");
/*
* 7)
*/
/* FIXME[reserve a region for the system's IDT very much as for
the GDT above] */
/*
* 8)
*/
/* XXX
if (region_reserve(asid,
_init->kcode,
LINKER_SYMBOL(_handler_begin) - _init->kcode,
REGION_OPTION_FORCE | REGION_OPTION_PRIVILEGED,
LINKER_SYMBOL(_handler_begin),
LINKER_SYMBOL(_handler_end) -
LINKER_SYMBOL(_handler_begin),
&reg) != ERROR_OK)
if (region_reserve(asid,
_init->kcode,
LINKER_SYMBOL(_handler_data_begin) - _init->kcode,
REGION_OPTION_FORCE | REGION_OPTION_PRIVILEGED,
LINKER_SYMBOL(_handler_data_begin),
LINKER_SYMBOL(_handler_data_end) -
LINKER_SYMBOL(_handler_data_begin),
&reg) != ERROR_OK)
*/
if (segment_locate(_init->kcode, &segment) == ERROR_FALSE)
MACHINE_ESCAPE("unable to locate the segment which contains the "
"kernel code");
if (region_reserve(as->id,
segment,
0x0,
REGION_OPTION_FORCE,
(t_vaddr)_init->kcode,
(t_vsize)_init->kcodesz,
&region) != ERROR_OK)
MACHINE_ESCAPE("unable to reserve the region mapping the kernel code");
/*
* 9)
*/
if (segment_locate(_init->kstack,
&segment) == ERROR_FALSE)
MACHINE_ESCAPE("unable to locate the segment which contains the "
"kernel stack");
if (region_reserve(as->id,
segment,
0x0,
REGION_OPTION_FORCE,
(t_vaddr)_init->kstack,
(t_vsize)_init->kstacksz,
&region) != ERROR_OK)
MACHINE_ESCAPE("unable to reserve the region mapping the kernel stack");
MACHINE_LEAVE();
}

View file

@ -0,0 +1,24 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/kane.../architecture/ia32/educational/handler.c
*
* created renaud voltz [thu feb 23 10:49:43 2006]
* updated julien quintard [mon apr 11 13:44:48 2011]
*/
/*
* ---------- includes --------------------------------------------------------
*/
#include <kaneton.h>
/*
* ---------- functions -------------------------------------------------------
*/
/* FIXME[complete if necessary] */

View file

@ -0,0 +1,34 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/kane...hine/architecture/ia32/educational/idt.c
*
* created renaud voltz [sun feb 12 02:02:19 2006]
* updated julien quintard [mon apr 11 13:44:56 2011]
*/
/*
* ---------- information -----------------------------------------------------
*
* this file provides functionalities for managing the IDT - Interrupt
* Descriptor Table.
*
* for more information regarding the handlers triggered through the IDT,
* please have a look at the handler.c file.
*/
/*
* ---------- includes --------------------------------------------------------
*/
#include <kaneton.h>
/*
* ---------- functions -------------------------------------------------------
*/
/* FIXME[complete if necessary] */

View file

@ -0,0 +1,117 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/kane...cture/ia32/educational/include/context.h
*
* created renaud voltz [tue apr 4 22:01:00 2006]
* updated julien quintard [mon feb 7 16:19:17 2011]
*/
#ifndef ARCHITECTURE_CONTEXT_H
#define ARCHITECTURE_CONTEXT_H 1
/*
* ---------- macro functions -------------------------------------------------
*/
/*
* this macro-function saves the context of the thread which had just
* been interrupted.
*
* note that at this point, the stack in use is the thread's pile i.e ring0
* stack; except for the kernel threads since there is no change in privilege.
*/
#define ARCHITECTURE_CONTEXT_SAVE() \
/* FIXME[code to complete] */
/*
* this macro-function restores the context of the thread whose PDBR and pile
* are referenced in _architecture.thread. as such, the whole ia32/educational
* context switch mechanism relies on the simple fact that changing
* the _architecture structure and returning from the interrupt will make
* the thread's context restored and its execution resumed.
*
* note that at this point, the environment is composed of the kernel PDBR
* and the KIS - Kernel Interrupt Stack.
*/
#define ARCHITECTURE_CONTEXT_RESTORE() \
/* FIXME[code to complete] */
/*
* ---------- dependencies ----------------------------------------------------
*/
#include <core/types.h>
/*
* ---------- types -----------------------------------------------------------
*/
/*
* this structure represents the IA32 context.
*/
typedef struct
{
t_reg32 ds;
t_reg32 edi;
t_reg32 esi;
t_reg32 ebp;
t_reg32 _esp;
t_reg32 ebx;
t_reg32 edx;
t_reg32 ecx;
t_reg32 eax;
t_reg32 error;
t_reg32 eip;
t_reg32 cs;
t_reg32 eflags;
t_reg32 esp;
t_reg32 ss;
} __attribute__ ((packed)) as_context;
/*
* ---------- prototypes ------------------------------------------------------
*
* ../context.c
*/
/*
* ../context.c
*/
t_error architecture_context_dump(as_context context);
t_error architecture_context_build(i_thread id);
t_error architecture_context_destroy(i_thread id);
t_error architecture_context_setup(void);
t_error architecture_context_locate(void);
t_error architecture_context_switch(i_thread current,
i_thread future);
t_error architecture_context_arguments(i_thread id,
void* arguments,
t_vsize size);
t_error architecture_context_get(i_thread id,
as_context* context);
t_error architecture_context_set(i_thread id,
as_context* context);
/*
* eop
*/
#endif

View file

@ -0,0 +1,56 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/kane...cture/ia32/educational/include/handler.h
*
* created renaud voltz [fri feb 17 16:48:22 2006]
* updated julien quintard [mon apr 11 13:45:51 2011]
*/
#ifndef ARCHITECTURE_HANDLER_H
#define ARCHITECTURE_HANDLER_H 1
/*
* ---------- macros ----------------------------------------------------------
*/
/*
* this macro represents the number of handlers the system must set up,
* for the exceptions, IRQs, IPIs and syscalls.
*/
#define ARCHITECTURE_HANDLER_SIZE ARCHITECTURE_IDT_EXCEPTION_SIZE + \
ARCHITECTURE_IDT_IRQ_SIZE + \
ARCHITECTURE_IDT_IPI_SIZE + \
ARCHITECTURE_IDT_SYSCALL_SIZE
/*
* this macro defines the size of a thread's pile i.e the stack used whenever
* a privilege change occurs.
*/
#define ARCHITECTURE_HANDLER_PILE_SIZE ___kaneton$pagesz
/*
* this macro defines the size of the KIS - Kernel Interrupt Stack. this is
* the stack which is used, within the kernel environment, for treating
* interrupts.
*/
#define ARCHITECTURE_HANDLER_KIS_SIZE 2 * ___kaneton$pagesz
/*
* ---------- prototypes ------------------------------------------------------
*
* ../handler.c
*/
/*
* eop
*/
#endif

View file

@ -0,0 +1,121 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/kane...hitecture/ia32/educational/include/idt.h
*
* created renaud voltz [fri feb 10 16:36:20 2006]
* updated julien quintard [mon apr 11 13:45:20 2011]
*/
#ifndef ARCHITECTURE_IDT_H
#define ARCHITECTURE_IDT_H 1
/*
* ---------- macros ----------------------------------------------------------
*/
/*
* these macros define the base entry and the number of entries for the
* several types of gate: IRQ, exception, IPI or syscall.
*
* note that 200 syscalls could be set up but the kernel limits itself
* to ten which is enough for a microkernel.
*/
#define ARCHITECTURE_IDT_EXCEPTION_BASE 0
#define ARCHITECTURE_IDT_EXCEPTION_SIZE 32
#define ARCHITECTURE_IDT_IRQ_BASE 32
#define ARCHITECTURE_IDT_IRQ_SIZE 16
#define ARCHITECTURE_IDT_IPI_BASE 48
#define ARCHITECTURE_IDT_IPI_SIZE 8
#define ARCHITECTURE_IDT_SYSCALL_BASE 56
#define ARCHITECTURE_IDT_SYSCALL_SIZE 10
/*
* these macro define some of the exception handler sources.
*/
#define ARCHITECTURE_IDT_EXCEPTION_DE \
ARCHITECTURE_IDT_EXCEPTION_BASE + 0
#define ARCHITECTURE_IDT_EXCEPTION_DB \
ARCHITECTURE_IDT_EXCEPTION_BASE + 1
#define ARCHITECTURE_IDT_EXCEPTION_BP \
ARCHITECTURE_IDT_EXCEPTION_BASE + 3
#define ARCHITECTURE_IDT_EXCEPTION_OF \
ARCHITECTURE_IDT_EXCEPTION_BASE + 4
#define ARCHITECTURE_IDT_EXCEPTION_BR \
ARCHITECTURE_IDT_EXCEPTION_BASE + 5
#define ARCHITECTURE_IDT_EXCEPTION_UD \
ARCHITECTURE_IDT_EXCEPTION_BASE + 6
#define ARCHITECTURE_IDT_EXCEPTION_NM \
ARCHITECTURE_IDT_EXCEPTION_BASE + 7
#define ARCHITECTURE_IDT_EXCEPTION_DF \
ARCHITECTURE_IDT_EXCEPTION_BASE + 8
#define ARCHITECTURE_IDT_EXCEPTION_TS \
ARCHITECTURE_IDT_EXCEPTION_BASE + 10
#define ARCHITECTURE_IDT_EXCEPTION_NP \
ARCHITECTURE_IDT_EXCEPTION_BASE + 11
#define ARCHITECTURE_IDT_EXCEPTION_SS \
ARCHITECTURE_IDT_EXCEPTION_BASE + 12
#define ARCHITECTURE_IDT_EXCEPTION_GP \
ARCHITECTURE_IDT_EXCEPTION_BASE + 13
#define ARCHITECTURE_IDT_EXCEPTION_PF \
ARCHITECTURE_IDT_EXCEPTION_BASE + 14
#define ARCHITECTURE_IDT_EXCEPTION_MF \
ARCHITECTURE_IDT_EXCEPTION_BASE + 16
#define ARCHITECTURE_IDT_EXCEPTION_AC \
ARCHITECTURE_IDT_EXCEPTION_BASE + 17
#define ARCHITECTURE_IDT_EXCEPTION_MC \
ARCHITECTURE_IDT_EXCEPTION_BASE + 18
#define ARCHITECTURE_IDT_EXCEPTION_XM \
ARCHITECTURE_IDT_EXCEPTION_BASE + 19
/*
* these macro define some of the IRQ handler sources.
*/
#define ARCHITECTURE_IDT_IRQ_PIT \
ARCHITECTURE_IDT_IRQ_BASE + 0
#define ARCHITECTURE_IDT_IRQ_KEYBOARD \
ARCHITECTURE_IDT_IRQ_BASE + 1
#define ARCHITECTURE_IDT_IRQ_CASCADE \
ARCHITECTURE_IDT_IRQ_BASE + 2
#define ARCHITECTURE_IDT_IRQ_COM2 \
ARCHITECTURE_IDT_IRQ_BASE + 3
#define ARCHITECTURE_IDT_IRQ_COM1 \
ARCHITECTURE_IDT_IRQ_BASE + 4
#define ARCHITECTURE_IDT_IRQ_FLOPPY \
ARCHITECTURE_IDT_IRQ_BASE + 6
#define ARCHITECTURE_IDT_IRQ_SPURIOUS \
ARCHITECTURE_IDT_IRQ_BASE + 7
#define ARCHITECTURE_IDT_IRQ_RTC \
ARCHITECTURE_IDT_IRQ_BASE + 8
#define ARCHITECTURE_IDT_IRQ_ATA1 \
ARCHITECTURE_IDT_IRQ_BASE + 14
#define ARCHITECTURE_IDT_IRQ_ATA2 \
ARCHITECTURE_IDT_IRQ_BASE + 15
/*
* ---------- dependencies ----------------------------------------------------
*/
#include <core/types.h>
/*
* ---------- prototypes ------------------------------------------------------
*
* ../idt.c
*/
/*
* eop
*/
#endif

View file

@ -0,0 +1,83 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/kane...ecture/ia32/educational/include/paging.h
*
* created julien quintard [fri feb 11 03:04:40 2005]
* updated julien quintard [mon apr 11 13:48:17 2011]
*/
#ifndef ARCHITECTURE_PAGING_H
#define ARCHITECTURE_PAGING_H 1
/*
* ---------- macro-functions -------------------------------------------------
*/
/*
* computes a virtual address according to the given directory and table
* entries.
*/
#define ARCHITECTURE_PAGING_ADDRESS(_pdei_, _ptei_) \
(t_vaddr)(((_pdei_) << 22) | ((_ptei_) << 12))
/*
* ---------- dependencies ----------------------------------------------------
*/
#include <core/types.h>
#include <architecture/register.h>
/*
* ---------- prototypes ------------------------------------------------------
*
* ../paging.c
*/
/*
* ../paging.c
*/
t_error architecture_paging_pdbr(t_paddr pd,
t_flags flags,
at_cr3* pdbr);
t_error architecture_paging_map(i_as id,
i_segment segment,
t_paddr offset,
t_options options,
t_vaddr address,
t_vsize size);
t_error architecture_paging_unmap(i_as id,
t_vaddr address,
t_vsize size);
t_error architecture_paging_read(i_segment id,
t_paddr offset,
void* buffer,
t_psize size);
t_error architecture_paging_write(i_segment id,
t_paddr offset,
const void* buffer,
t_psize size);
t_error architecture_paging_copy(i_region dst,
t_paddr to,
i_region src,
t_paddr from,
t_psize size);
/*
* eop
*/
#endif

View file

@ -0,0 +1,49 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/Down...chitecture/ia32/educational/include/pd.h
*
* created julien quintard [mon jan 10 09:05:19 2011]
* updated julien quintard [mon apr 11 13:20:03 2011]
*/
#ifndef ARCHITECTURE_PD_H
#define ARCHITECTURE_PD_H 1
/*
* ---------- macros ----------------------------------------------------------
*/
/*
* this value defines the page directory entry which acts as the mirror
* entry i.e the entry referencing the page directory itself.
*/
#define ARCHITECTURE_PD_MIRROR 1023
/*
* ---------- dependencies ----------------------------------------------------
*/
#include <architecture/types.h>
/*
* ---------- prototypes ------------------------------------------------------
*
* ../pd.c
*/
/*
* ../pd.c
*/
/*
* eop
*/
#endif

View file

@ -0,0 +1,48 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/Down...chitecture/ia32/educational/include/pt.h
*
* created julien quintard [mon jan 10 09:31:37 2011]
* updated julien quintard [mon apr 11 13:21:04 2011]
*/
#ifndef ARCHITECTURE_PT_H
#define ARCHITECTURE_PT_H 1
/*
* ---------- macros ----------------------------------------------------------
*/
/*
* defines the number entries composing a page table.
*/
#define ARCHITECTURE_PT_SIZE 1024
/*
* ---------- dependencies ----------------------------------------------------
*/
#include <architecture/types.h>
/*
* ---------- prototypes ------------------------------------------------------
*
* ../pt.c
*/
/*
* ../pt.c
*/
/*
* eop
*/
#endif

View file

@ -0,0 +1,545 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/kane...e/architecture/ia32/educational/paging.c
*
* created matthieu bucchianeri [tue dec 20 13:45:05 2005]
* updated julien quintard [tue apr 12 07:34:31 2011]
*/
/*
* ---------- information -----------------------------------------------------
*
* this file provides basic paging functionalities.
*
* note that the ia32/educational implementation makes use of the mirroring
* technique. this technique is used to prevent the kernel from looping
* infinitely whenever trying to map a page table for instance. indeed,
* whenever a page table must be mapped, a virtual memory area is picked.
* this memory area must be mapped by creating a mapping in the page
* directory/table hierarchical structure. in order to do so, the kernel page
* directory and the kernel page table referencing the given area must
* also be mapped. should not this be the case, the referencing page table,
* for example, should be mapped by creating a mapping etc. there is an
* obvious infinite loop occuring here where every page table which has
* to be mapped implies another page table to reference it, hence to be
* mapped as well.
*
* in order to prevent this infinite loop, the kernel relies on the
* mirroring technique. this mechanism consists in setting up a kernel
* page directory entry as acting as a special loopback. for example, the
* last page directory entry, i.e pd[1023], will not reference another page
* table as it is the case for the other page directory entries but the
* page directory itself. this way, the page directory, whenever the
* mirroring page directory entry is used, is considered by the microprocessor
* as page table and its entries as page tables entries.
*
* the following schema depicts this organization:
*
* pd @ 0x011e2000
* pde 0 references 0x011e3000
* pt @ 0x011e3000
* pte 0 not present
* pte 1 references 0x00001000
* pte 2 references 0x00002000
* pte 3 references 0x00003000
* pte 4 references 0x00004000
* ...
* pde 2 not present
* pde 3 not present
* pde 4 references 0x011e4000
* pt @ 0x011e4000
* pte 0 references 0x01000000
* pte 1 references 0x01001000
* pte 2 references 0x01002000
* ...
* pde 1023 references 0x011e2000 (the page directory itself)
* pt @ 0x011e2000 (the page directory acts as a page table)
* pte 0 references 0x011e3000 (the first page table: pde 0)
* pte 4 references 0x011e4000 (the second page table: pde 1)
* pte 1023 references 0x011e2000 (the page directory: pde 1023)
*
* this technique---assuming the kernel page directory is mapped, but this is
* quite obvious---enables the kernel to modify any page directory/table
* without mapping any intermediate page table. indeed, the kernel only needs
* to reference the page table it wishes to modify through its mirrored
* page directory entry.
*
* this technique implies that the last page directory entry is reserved
* for the mirroring mechanism. therefore, the last 4MB of virtual memory
* must be locked for that purpose meaning that 4MB of memory are wasted.
*
* finally, the reader should have understood that the mirroring mechanism
* only applies to the kernel tables. indeed, other address spaces'
* page directories and tables can be mapped normally since, should
* a kernel table need to be mapped to access another non-kernel table, it
* will be through the mirroring mechanism.
*/
/*
* ---------- includes --------------------------------------------------------
*/
#include <kaneton.h>
/*
* ---------- externs ---------------------------------------------------------
*/
/*
* the kernel manager.
*/
extern m_kernel _kernel;
/*
* ---------- functions -------------------------------------------------------
*/
/*
* this function generates the CR3 register's content so that to be
* overwritten, hence referencing another page directory structure.
*
* note that the CR3 register is also referred to as the PDBR - Page Directory
* Base Register as it contains the address of the page directory in use.
*
* steps:
*
* 0) verify the arguments.
* 1) generate the CR3 register's content.
*/
t_error architecture_paging_pdbr(t_paddr pd,
t_flags flags,
at_cr3* pdbr)
{
/*
* 0)
*/
if (pdbr == NULL)
MACHINE_ESCAPE("the 'pdbr' argument is null");
/*
* 1)
*/
*pdbr = (pd & 0xfffff000) | flags;
MACHINE_LEAVE();
}
/*
* this function maps a portion of the given segment to the given virtual
* address.
*/
t_error architecture_paging_map(i_as id,
i_segment segment,
t_paddr offset,
t_options options,
t_vaddr address,
t_vsize size)
{
/* FIXME[code to complete] */
MACHINE_LEAVE();
}
/*
* this function unmaps the mappings associated with the given address and
* size.
*/
t_error architecture_paging_unmap(i_as id,
t_vaddr address,
t_vsize size)
{
/* FIXME[code to complete] */
MACHINE_LEAVE();
}
/*
* this function reads data from a given segment by temporarily mapping
* the necessary pages.
*
* note that this function supports non-aligned addresses and sizes.
*
* steps:
*
* 0) verify the arguments.
* 1) retrieve the segment object.
* 2) compute the aligned segment offset along with the non-aligned shift.
* 3) compute the last aligned page boundary.
* 4) reserve a region for the involved pages.
* 5) retrieve the region object.
* 6) copy data from the mapped region into the given buffer.
* 7) release the region.
*/
t_error architecture_paging_read(i_segment id,
t_paddr offset,
void* buffer,
t_psize size)
{
t_paddr shift;
i_region region;
t_paddr end;
o_segment* o;
o_region* r;
/*
* 0)
*/
if (buffer == NULL)
MACHINE_ESCAPE("the 'buffer' argument is null");
/*
* 1)
*/
if (segment_get(id, &o) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the segment object");
/*
* 2)
*/
if (offset % ___kaneton$pagesz)
{
shift = offset - (offset & ~(___kaneton$pagesz - 1));
offset = offset & ~(___kaneton$pagesz - 1);
}
else
{
shift = 0;
}
/*
* 3)
*/
end = offset + shift + size;
if (end % ___kaneton$pagesz)
end = (end & ~(___kaneton$pagesz - 1)) + ___kaneton$pagesz;
/*
* 4)
*/
if (region_reserve(_kernel.as,
id,
offset,
REGION_OPTION_NONE,
0x0,
end - offset,
&region) != ERROR_OK)
MACHINE_ESCAPE("unable to reserve a region");
/*
* 5)
*/
if (region_get(_kernel.as, region, &r) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the region object");
/*
* 6)
*/
memcpy(buffer, (void*)r->address + shift, size);
/*
* 7)
*/
if (region_release(_kernel.as, region) != ERROR_OK)
MACHINE_ESCAPE("unable to release the region");
MACHINE_LEAVE();
}
/*
* this function writes data to a given segment by temporarily mapping
* the necessary pages.
*
* note that this function supports non-aligned addresses and sizes.
*
* steps:
*
* 0) verify the arguments.
* 1) retrieve the segment object.
* 2) compute the aligned segment offset along with the non-aligned shift.
* 3) compute the last aligned page boundary.
* 4) reserve a region for the involved pages.
* 5) retrieve the region object.
* 6) copy data from the given buffer to the mapped region.
* 7) release the region.
*/
t_error architecture_paging_write(i_segment id,
t_paddr offset,
const void* buffer,
t_psize size)
{
t_paddr shift;
i_region region;
t_paddr end;
o_segment* o;
o_region* r;
/*
* 0)
*/
if (buffer == NULL)
MACHINE_ESCAPE("the 'buffer' argument is null");
/*
* 1)
*/
if (segment_get(id, &o) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the segment object");
/*
* 2)
*/
if (offset % ___kaneton$pagesz)
{
shift = offset - (offset & ~(___kaneton$pagesz - 1));
offset = offset & ~(___kaneton$pagesz - 1);
}
else
{
shift = 0;
}
/*
* 3)
*/
end = offset + shift + size;
if (end % ___kaneton$pagesz)
end = (end & ~(___kaneton$pagesz - 1)) + ___kaneton$pagesz;
/*
* 4)
*/
if (region_reserve(_kernel.as,
id,
offset,
REGION_OPTION_NONE,
0x0,
end - offset,
&region) != ERROR_OK)
MACHINE_ESCAPE("unable to reserve a region");
/*
* 5)
*/
if (region_get(_kernel.as, region, &r) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the region object");
/*
* 6)
*/
memcpy((void*)r->address + shift, buffer, size);
/*
* 7)
*/
if (region_release(_kernel.as, region) != ERROR_OK)
MACHINE_ESCAPE("unable to release the region");
MACHINE_LEAVE();
}
/*
* this function copies data from a segment to another by temporarily
* mapping the necessary pages.
*
* note that this function supports non-aligned addresses and sizes.
*
* steps:
*
* 1) retrieve the segment objects.
* 2) compute the source aligned offset and non-aligned shift.
* 3) compute the source last aligned page boundary.
* 4) map the source pages through a region reservation.
* 5) retrieve the reserved region.
* 6) compute the destination aligned offset and non-aligned shift.
* 7) compute the destination last aligned page boundary.
* 8) map the destination pages through a region reservation.
* 9) retrieve the reserved region.
* 10) copy from the source mapped region to the destination mapped region.
* 11) release the reserved regions.
*/
t_error architecture_paging_copy(i_region dst,
t_paddr to,
i_region src,
t_paddr from,
t_psize size)
{
struct
{
struct
{
o_segment* object;
} segment;
struct
{
i_region id;
o_region* object;
} region;
t_paddr shift;
} source;
struct
{
struct
{
o_segment* object;
} segment;
struct
{
i_region id;
o_region* object;
} region;
t_paddr shift;
} destination;
t_paddr end;
/*
* 1)
*/
if (segment_get(dst, &destination.segment.object) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the segment object");
if (segment_get(src, &source.segment.object) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the segment object");
/*
* 2)
*/
if (from % ___kaneton$pagesz)
{
source.shift = from - (from & ~(___kaneton$pagesz - 1));
from = from & ~(___kaneton$pagesz - 1);
}
else
{
source.shift = 0;
}
/*
* 3)
*/
end = from + source.shift + size;
if (end % ___kaneton$pagesz)
end = (end & ~(___kaneton$pagesz - 1)) + ___kaneton$pagesz;
/*
* 4)
*/
if (region_reserve(_kernel.as,
src,
from,
REGION_OPTION_NONE,
0x0,
end - from,
&source.region.id) != ERROR_OK)
MACHINE_ESCAPE("unable to reserve a region");
/*
* 5)
*/
if (region_get(_kernel.as,
source.region.id,
&source.region.object) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the region object");
/*
* 6)
*/
if (to % ___kaneton$pagesz)
{
destination.shift = to - (to & ~(___kaneton$pagesz - 1));
to = to & ~(___kaneton$pagesz - 1);
}
else
{
destination.shift = 0;
}
/*
* 7)
*/
end = to + destination.shift + size;
if (end % ___kaneton$pagesz)
end = (end & ~(___kaneton$pagesz - 1)) + ___kaneton$pagesz;
/*
* 8)
*/
if (region_reserve(_kernel.as,
dst,
to,
REGION_OPTION_NONE,
0x0,
end - to,
&destination.region.id) != ERROR_OK)
MACHINE_ESCAPE("unable to reserve a region");
/*
* 9)
*/
if (region_get(_kernel.as,
destination.region.id,
&destination.region.object) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the region object");
/*
* 10)
*/
memcpy((void*)destination.region.object->address + destination.shift,
(void*)source.region.object->address + source.shift,
size);
/*
* 11)
*/
if (region_release(_kernel.as, source.region.id) != ERROR_OK)
MACHINE_ESCAPE("unable to release the region");
if (region_release(_kernel.as, destination.region.id) != ERROR_OK)
MACHINE_ESCAPE("unable to release the region");
MACHINE_LEAVE();
}

View file

@ -0,0 +1,30 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/kane...chine/architecture/ia32/educational/pd.c
*
* created matthieu bucchianeri [tue dec 20 19:56:20 2005]
* updated julien quintard [mon apr 11 13:45:04 2011]
*/
/*
* ---------- information -----------------------------------------------------
*
* this file contains functions for managing PDs - Page Directories.
*/
/*
* ---------- includes --------------------------------------------------------
*/
#include <kaneton.h>
/*
* ---------- functions -------------------------------------------------------
*/
/* FIXME[complete if necessary] */

View file

@ -0,0 +1,32 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/kane...chine/architecture/ia32/educational/pt.c
*
* created matthieu bucchianeri [tue dec 20 19:56:48 2005]
* updated julien quintard [mon apr 11 13:45:11 2011]
*/
/*
* ---------- information -----------------------------------------------------
*
* this file contains functions for manipulating PTs - Page Tables.
*
* note that the whole file is extremely similar to pd.c.
*/
/*
* ---------- includes --------------------------------------------------------
*/
#include <kaneton.h>
/*
* ---------- functions -------------------------------------------------------
*/
/* FIXME[complete if necessary] */

View file

@ -0,0 +1,88 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/kane...achine/glue/ibm-pc.ia32/educational/as.c
*
* created matthieu bucchianeri [sat jun 16 18:10:38 2007]
* updated julien quintard [sat feb 5 13:57:19 2011]
*/
/*
* ---------- information -----------------------------------------------------
*
* this file implements the glue for the address space manager.
*
* the glue particularly sets up the initial address space's state by
* building the IA32 page directory. note that glue_as_reserve()
* detects the kernel address space as this one must be prepared specially.
*/
/*
* ---------- includes --------------------------------------------------------
*/
#include <kaneton.h>
/*
* ---------- externs ---------------------------------------------------------
*/
/*
* the kernel manager.
*/
extern m_kernel _kernel;
/*
* ---------- globals ---------------------------------------------------------
*/
/*
* the address space dispatcher.
*/
d_as glue_as_dispatch =
{
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
glue_as_reserve,
NULL,
NULL,
NULL
};
/*
* ---------- functions -------------------------------------------------------
*/
/*
* this function initializes the given address space depending to the task,
* being either the kernel or one of the servers i.e a driver, service or
* guest.
*/
t_error glue_as_reserve(i_task task,
i_as* as)
{
if (*as == _kernel.as)
{
if (architecture_environment_kernel(*as) != ERROR_OK)
MACHINE_ESCAPE("unable to initialize the kernel's address space");
}
else
{
if (architecture_environment_server(*as) != ERROR_OK)
MACHINE_ESCAPE("unable to initialize the server's address space");
}
MACHINE_LEAVE();
}

View file

@ -0,0 +1,45 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/kane...ine/glue/ibm-pc.ia32/educational/event.c
*
* created renaud voltz [mon feb 13 01:05:52 2006]
* updated julien quintard [sat feb 5 12:30:16 2011]
*/
/*
* ---------- information -----------------------------------------------------
*
* this file implements the event manager's glue.
*/
/*
* ---------- includes --------------------------------------------------------
*/
#include <kaneton.h>
/*
* ---------- globals ---------------------------------------------------------
*/
/*
* the event dispatcher.
*/
d_event glue_event_dispatch =
{
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL
};

View file

@ -0,0 +1,90 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/kane...lue/ibm-pc.ia32/educational/include/as.h
*
* created julien quintard [sun jun 3 23:54:56 2007]
* updated julien quintard [sat feb 5 15:44:46 2011]
*/
#ifndef GLUE_AS_H
#define GLUE_AS_H 1
/*
* ---------- macro functions -------------------------------------------------
*/
/*
* this macro-function defines the address space dispatcher.
*/
#define machine_include_as() \
extern d_as glue_as_dispatch
/*
* this macro-function dispatches the address space calls.
*/
#define machine_call_as(_function_, _args_...) \
( \
{ \
t_error _r_ = ERROR_OK; \
\
if (glue_as_dispatch.as_ ## _function_ != NULL) \
_r_ = glue_as_dispatch.as_ ## _function_(_args_); \
\
_r_; \
} \
)
/*
* this macro-function includes data in the 'm_as' type.
*/
#define machine_data_m_as()
/*
* this macro-function includes data in the address space object 'o_as':
* the address space IA32-specific PD - Page directory's location.
*/
#define machine_data_o_as() \
struct \
{ \
t_paddr pd; \
} machine;
/*
* ---------- dependencies ----------------------------------------------------
*/
#include <modules/modules.h>
/*
* ---------- prototypes ------------------------------------------------------
*
* ../as.c
*/
/*
* ../as.c
*/
t_error glue_as_show(i_as id,
mt_margin margin);
t_error glue_as_dump(void);
t_error glue_as_reserve(i_task task,
i_as* as);
/*
* eop
*/
#endif

View file

@ -0,0 +1,83 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/kane.../ibm-pc.ia32/educational/include/event.h
*
* created julien quintard [wed jun 6 16:15:26 2007]
* updated julien quintard [sat feb 5 13:30:56 2011]
*/
#ifndef GLUE_EVENT_H
#define GLUE_EVENT_H 1
/*
* ---------- macro functions -------------------------------------------------
*/
/*
* this macro-function defines the event dispatcher.
*/
#define machine_include_event() \
extern d_event glue_event_dispatch
/*
* this macro-function dispatches the CPU calls.
*/
#define machine_call_event(_function_, _args_...) \
( \
{ \
t_error _r_ = ERROR_OK; \
\
if (glue_event_dispatch.event_ ## _function_ != NULL) \
_r_ = glue_event_dispatch.event_ ## _function_(_args_); \
\
_r_; \
} \
)
/*
* this macro-function includes data in 'm_event'.
*/
#define machine_data_m_event()
/*
* this macro-function includes data in 'o_event'.
*/
#define machine_data_o_event()
/*
* this macro-function includes data in 'o_event_message'.
*/
#define machine_data_o_event_message()
/*
* ---------- dependencies ----------------------------------------------------
*/
#include <core/event.h>
/*
* ---------- prototypes ------------------------------------------------------
*
* ../event.c
*/
/*
* ../event.c
*/
/*
* eop
*/
#endif

View file

@ -0,0 +1,49 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/kane...ne/glue/ibm-pc.ia32/educational/region.c
*
* created julien quintard [wed dec 14 07:06:44 2005]
* updated julien quintard [sat feb 5 14:37:43 2011]
*/
/*
* ---------- information -----------------------------------------------------
*
* this file implements the region manager's glue which basically consists
* in updating the address space's page directory, tables etc. in order
* to reflect the core operation.
*/
/*
* ---------- includes --------------------------------------------------------
*/
#include <kaneton.h>
/*
* ---------- globals ---------------------------------------------------------
*/
/*
* the region dispatcher.
*/
d_region glue_region_dispatch =
{
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL
};

View file

@ -0,0 +1,219 @@
/*
* ---------- header ----------------------------------------------------------
*
* project kaneton
*
* license kaneton
*
* file /home/mycure/kane...glue/ibm-pc.ia32/educational/scheduler.c
*
* created matthieu bucchianeri [sat jun 3 22:45:19 2006]
* updated julien quintard [tue feb 15 21:03:52 2011]
*/
/*
* ---------- information -----------------------------------------------------
*
* this file implements the scheduler manager's glue.
*/
/*
* ---------- includes --------------------------------------------------------
*/
#include <kaneton.h>
/*
* ---------- externs ---------------------------------------------------------
*/
/*
* the scheduler manager.
*/
extern m_scheduler _scheduler;
/*
* ---------- globals ---------------------------------------------------------
*/
/*
* the scheduler dispatcher.
*/
d_scheduler glue_scheduler_dispatch =
{
NULL,
glue_scheduler_dump,
glue_scheduler_start,
glue_scheduler_stop,
glue_scheduler_quantum,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL
};
/*
* ---------- functions -------------------------------------------------------
*/
/*
* this function dumps the scheduler manager's machine part.
*/
t_error glue_scheduler_dump(void)
{
module_call(console, message,
'#',
" machine: timer(%qd)\n",
_scheduler.machine.timer);
MACHINE_LEAVE();
}
/*
* this function starts the scheduler.
*
* steps:
*
* 1) reserve a timer so that a context switch is triggered every quantum
* milliseconds.
*/
t_error glue_scheduler_start(i_cpu cpu)
{
/*
* 1)
*/
if (timer_reserve(TIMER_TYPE_FUNCTION,
TIMER_ROUTINE(glue_scheduler_switch),
TIMER_DATA(NULL),
_scheduler.quantum,
TIMER_OPTION_REPEAT,
&_scheduler.machine.timer) != ERROR_OK)
MACHINE_ESCAPE("unable to reserve the timer");
MACHINE_LEAVE();
}
/*
* this function contributes to stopping the scheduler by manually triggering
* the timer interrupt in order to induce an immediate scheduler election.
*
* during this election, the scheduler will notice the scheduler's state
* has changed to stop and will do the necessary to stop electing threads.
*
* 1) retrieve the current CPU's scheduler.
* 2) if the CPU to stop is the current one, yield the execution in order
* to induce a scheduler election.
* 3) release the scheduler timer.
*/
t_error glue_scheduler_stop(i_cpu cpu)
{
o_scheduler* scheduler;
/*
* 1)
*/
if (scheduler_current(&scheduler) != ERROR_OK)
MACHINE_ESCAPE("unable to retrieve the current scheduler");
/*
* 2)
*/
if (scheduler->cpu == cpu)
{
if (scheduler_yield() != ERROR_OK)
MACHINE_ESCAPE("unable to yield the execution");
}
/*
* 3)
*/
if (timer_release(_scheduler.machine.timer) != ERROR_OK)
MACHINE_ESCAPE("unable to release the timer");
MACHINE_LEAVE();
}
/*
* this handler is triggered whenever the scheduler quantum of time has
* elapsed. this function requests the scheduler to elect a new thread
* and performs the context switch.
*
* steps:
*
* 1) retrieve the current CPU's scheduler.
* 2) save the identifier of the currently running thread.
* 3) request a thread election to the scheduler.
* 4) save the identifier of the thread about to be executed.
* 5) perform a context switch between the two threads.
*/
void glue_scheduler_switch(void)
{
o_scheduler* scheduler;
i_thread current;
i_thread future;
/*
* 1)
*/
assert(scheduler_current(&scheduler) == ERROR_OK);
/*
* 2)
*/
current = scheduler->thread;
/*
* 3)
*/
assert(scheduler_elect() == ERROR_OK);
/*
* 4)
*/
future = scheduler->thread;
/*
* 5)
*/
assert(architecture_context_switch(current, future) == ERROR_OK);
}
/*
* this function sets the scheduler quantum value.
*
* steps:
*
* 1) note that since the quantum has been updated, the scheduler timer
* must also be adujsted.
*/
t_error glue_scheduler_quantum(t_quantum quantum)
{
/*
* 1)
*/
if (timer_update(_scheduler.machine.timer, quantum) != ERROR_OK)
MACHINE_ESCAPE("unable to adjust the timer's delay to the "
"scheduler's quantum");
MACHINE_LEAVE();
}

View file

@ -0,0 +1,60 @@
#
# ---------- header -----------------------------------------------------------
#
# project kaneton
#
# license kaneton
#
# file /home/mycure/kaneton/export/output/kaneton/test/Makefile
#
# created julien quintard [fri jun 29 11:19:40 2007]
# updated julien quintard [sat feb 5 12:10:10 2011]
#
#
# ---------- component --------------------------------------------------------
#
component := test
#
# ---------- dependencies -----------------------------------------------------
#
include ../environment/env.mk
#
# ---------- directives -------------------------------------------------------
#
.PHONY: main clear prototypes headers
#
# ---------- variables --------------------------------------------------------
#
SUBDIRS := client \
packages
#
# ---------- rules ------------------------------------------------------------
#
main:
clear:
for d in $(SUBDIRS) ; do \
$(call env_launch,$${d}/Makefile,clear,) ; \
done
$(call env_purge,)
prototypes:
for d in $(SUBDIRS) ; do \
$(call env_launch,$${d}/Makefile,clear,) ; \
done
headers:
for d in $(SUBDIRS) ; do \
$(call env_launch,$${d}/Makefile,clear,) ; \
done

View file

@ -0,0 +1,50 @@
#
# ---------- header -----------------------------------------------------------
#
# project kaneton
#
# license kaneton
#
# file /home/mycure/kaneton/export/output/kaneton/tool/Makefile
#
# created julien quintard [fri nov 28 16:24:37 2008]
# updated julien quintard [sat feb 5 12:11:38 2011]
#
#
# ---------- dependencies -----------------------------------------------------
#
include ../environment/env.mk
#
# ---------- directives -------------------------------------------------------
#
.PHONY: main clear prototypes headers
#
# ---------- variables --------------------------------------------------------
#
SUBDIRS :=
#
# ---------- rules ------------------------------------------------------------
#
main:
for d in $(SUBDIRS) ; do \
$(call env_launch,$${d}/Makefile,,) ; \
done
clear:
for d in $(SUBDIRS) ; do \
$(call env_launch,$${d}/Makefile,clear,) ; \
done
$(call env_purge,)
prototypes:
headers: