x86_32 implementation for kernel

This commit is contained in:
nakst 2021-11-01 19:32:56 +00:00
parent 1ce7df083a
commit ed038d1059
22 changed files with 2066 additions and 1401 deletions

View File

@ -27,12 +27,11 @@ EsCRTsqrtf:
[global ProcessorReadTimeStamp] [global ProcessorReadTimeStamp]
ProcessorReadTimeStamp: ProcessorReadTimeStamp:
; TODO rdtsc
ret ret
[global ProcessorCheckStackAlignment] [global ProcessorCheckStackAlignment]
ProcessorCheckStackAlignment: ProcessorCheckStackAlignment:
; TODO
ret ret
[global ProcessorTLSRead] [global ProcessorTLSRead]

View File

@ -1,14 +1,36 @@
// TODO Support for systems without MMX/SSE.
// TODO Support for systems without an APIC.
#ifndef IMPLEMENTATION #ifndef IMPLEMENTATION
struct MMArchVAS { struct MMArchVAS {
// NOTE Must be first in the structure. See ProcessorSetAddressSpace and ArchSwitchContext.
uintptr_t cr3;
// Each process has a 32-bit address space.
// That's 2^20 pages.
// That's 2^10 L1 page tables. 2^7 bytes of bitset.
// Tracking of the committed L1 tables is done in l1Commit.
#define L1_COMMIT_SIZE_BYTES (1 << 7)
uint8_t l1Commit[L1_COMMIT_SIZE_BYTES];
size_t pageTablesCommitted;
size_t pageTablesActive;
// TODO Consider core/kernel mutex consistency? I think it's fine, but...
KMutex mutex; // Acquire to modify the page tables.
}; };
#define MM_KERNEL_SPACE_START (0xC0000000) #define MM_KERNEL_SPACE_START (0xC4000000)
#define MM_KERNEL_SPACE_SIZE (0xE0000000 - 0xC0000000) #define MM_KERNEL_SPACE_SIZE (0xE0000000 - 0xC4000000)
#define MM_MODULES_START (0xE0000000) #define MM_MODULES_START (0xE0000000)
#define MM_MODULES_SIZE (0xE8000000 - 0xE0000000) #define MM_MODULES_SIZE (0xE6000000 - 0xE0000000)
#define MM_CORE_REGIONS_START (0xE8000000) #define MM_CORE_REGIONS_START (0xE6000000)
#define MM_CORE_REGIONS_COUNT ((0xEC000000 - 0xE8000000) / sizeof(MMRegion)) #define MM_CORE_REGIONS_COUNT ((0xE7000000 - 0xE6000000) / sizeof(MMRegion))
#define MM_CORE_SPACE_START (0xE7000000)
#define MM_CORE_SPACE_SIZE (0xEC000000 - 0xE7000000)
#define LOW_MEMORY_MAP_START (0xEC000000)
#define LOW_MEMORY_LIMIT (0x00400000) // The first 4MB - 4KB is mapped here; the last 4KB map to the local APIC.
#define LOCAL_APIC_BASE (0xEC3FF000)
#define ArchCheckBundleHeader() (header.mapAddress >= 0xC0000000ULL || header.mapAddress < 0x1000 || fileSize > 0x10000000ULL) #define ArchCheckBundleHeader() (header.mapAddress >= 0xC0000000ULL || header.mapAddress < 0x1000 || fileSize > 0x10000000ULL)
#define ArchCheckELFHeader() (header->virtualAddress >= 0xC0000000ULL || header->virtualAddress < 0x1000 || header->segmentSize > 0x10000000ULL) #define ArchCheckELFHeader() (header->virtualAddress >= 0xC0000000ULL || header->virtualAddress < 0x1000 || header->segmentSize > 0x10000000ULL)
@ -20,266 +42,328 @@ struct MMArchVAS {
#ifdef IMPLEMENTATION #ifdef IMPLEMENTATION
#define KERNEL_PANIC_IPI (0) // NMIs ignore the interrupt vector. // Recursive page table mapping in slot 0x1FF.
size_t ProcessorSendIPI(uintptr_t interrupt, bool nmi = false, int processorID = -1); // Returns the number of processors the IPI was *not* sent to. #define PAGE_TABLE_L2 ((volatile uint32_t *) 0xFFFFF000)
void ProcessorReset(); #define PAGE_TABLE_L1 ((volatile uint32_t *) 0xFFC00000)
uintptr_t ArchFindRootSystemDescriptorPointer(); #define ENTRIES_PER_PAGE_TABLE (1024)
void ArchStartupApplicationProcessors(); #define ENTRIES_PER_PAGE_TABLE_BITS (10)
uint32_t bootloaderID;
struct InterruptContext {
uint32_t cr8, cr2, ds;
uint8_t fxsave[512 + 16];
uint32_t ebp, edi, esi, edx, ecx, ebx, eax;
uint32_t fromRing0, irq, errorCode;
uint32_t eip, cs, flags, esp, ss;
// Note that when ring 0 is interrupted the order is different:
// esp, ss, irq, errorCode, eip, cs, flags.
// However we fix it before and after interrupts in InterruptHandler.
};
volatile uintptr_t tlbShootdownVirtualAddress;
volatile size_t tlbShootdownPageCount;
volatile size_t tlbShootdownProcessorsRemaining;
#include <arch/x86_pc.h>
#include <drivers/acpi.cpp> #include <drivers/acpi.cpp>
#include <arch/x86_pc.cpp>
size_t ProcessorSendIPI(uintptr_t interrupt, bool nmi, int processorID) {
(void) interrupt;
(void) nmi;
(void) processorID;
// TODO.
return 0;
}
void ArchInitialise() {
// TODO.
}
void ArchNextTimer(size_t ms) {
// TODO.
}
uint64_t ArchGetTimeMs() {
// TODO.
return 0;
}
InterruptContext *ArchInitialiseThread(uintptr_t kernelStack, uintptr_t kernelStackSize, InterruptContext *ArchInitialiseThread(uintptr_t kernelStack, uintptr_t kernelStackSize,
Thread *thread, uintptr_t startAddress, uintptr_t argument1, Thread *thread, uintptr_t startAddress, uintptr_t argument1,
uintptr_t argument2, bool userland, uintptr_t stack, uintptr_t userStackSize) { uintptr_t argument2, bool userland, uintptr_t stack, uintptr_t userStackSize) {
// TODO. InterruptContext *context = ((InterruptContext *) (kernelStack + kernelStackSize - 12)) - 1;
return nullptr; thread->kernelStack = kernelStack + kernelStackSize - 12;
*((uintptr_t *) (kernelStack + kernelStackSize - 12)) = (uintptr_t) KThreadTerminate;
*((uintptr_t *) (kernelStack + kernelStackSize - 8)) = argument1;
*((uintptr_t *) (kernelStack + kernelStackSize - 4)) = argument2;
context->fxsave[32] = 0x80;
context->fxsave[33] = 0x1F;
if (userland) {
context->cs = 0x1B;
context->ds = 0x23;
context->ss = 0x23;
} else {
context->cs = 0x08;
context->ds = 0x10;
context->ss = 0x10;
context->fromRing0 = true;
}
context->flags = 1 << 9; // Interrupt flag
context->eip = startAddress;
context->esp = stack + userStackSize;
return context;
} }
void ArchSwitchContext(InterruptContext *context, MMArchVAS *virtualAddressSpace, uintptr_t threadKernelStack, void TLBShootdownCallback() {
Thread *newThread, MMSpace *oldAddressSpace) { uintptr_t page = tlbShootdownVirtualAddress;
// TODO.
}
EsError ArchApplyRelocation(uintptr_t type, uint8_t *buffer, uintptr_t offset, uintptr_t result) { // TODO How should this be determined?
// TODO. #define INVALIDATE_ALL_PAGES_THRESHOLD (1024)
return ES_ERROR_UNSUPPORTED_FEATURE; if (tlbShootdownPageCount > INVALIDATE_ALL_PAGES_THRESHOLD) {
} ProcessorInvalidateAllPages();
} else {
bool MMArchMapPage(MMSpace *space, uintptr_t physicalAddress, uintptr_t virtualAddress, unsigned flags) { for (uintptr_t i = 0; i < tlbShootdownPageCount; i++, page += K_PAGE_SIZE) {
// TODO. ProcessorInvalidatePage(page);
return false; }
} }
void MMArchUnmapPages(MMSpace *space, uintptr_t virtualAddressStart, uintptr_t pageCount, unsigned flags, size_t unmapMaximum, uintptr_t *resumePosition) {
// TODO.
}
bool MMArchMakePageWritable(MMSpace *space, uintptr_t virtualAddress) {
// TODO.
return false;
}
bool MMArchHandlePageFault(uintptr_t address, uint32_t flags) {
// TODO.
return false;
} }
void MMArchInvalidatePages(uintptr_t virtualAddressStart, uintptr_t pageCount) { void MMArchInvalidatePages(uintptr_t virtualAddressStart, uintptr_t pageCount) {
// TODO. KSpinlockAcquire(&ipiLock);
tlbShootdownVirtualAddress = virtualAddressStart;
tlbShootdownPageCount = pageCount;
tlbShootdownProcessorsRemaining = KGetCPUCount();
if (tlbShootdownProcessorsRemaining > 1) {
size_t ignored = ProcessorSendIPI(TLB_SHOOTDOWN_IPI);
__sync_fetch_and_sub(&tlbShootdownProcessorsRemaining, ignored);
while (tlbShootdownProcessorsRemaining);
}
TLBShootdownCallback();
KSpinlockRelease(&ipiLock);
} }
bool MMArchIsBufferInUserRange(uintptr_t baseAddress, size_t byteCount) { bool MMArchIsBufferInUserRange(uintptr_t baseAddress, size_t byteCount) {
// TODO. // TODO.
KernelPanic("Unimplemented!\n");
return false; return false;
} }
bool MMArchSafeCopy(uintptr_t destinationAddress, uintptr_t sourceAddress, size_t byteCount) { bool MMArchSafeCopy(uintptr_t destinationAddress, uintptr_t sourceAddress, size_t byteCount) {
// TODO. // TODO.
KernelPanic("Unimplemented!\n");
return false; return false;
} }
bool MMArchCommitPageTables(MMSpace *space, MMRegion *region) { bool MMArchCommitPageTables(MMSpace *space, MMRegion *region) {
// TODO. KMutexAssertLocked(&space->reserveMutex);
return false;
MMArchVAS *data = &space->data;
uintptr_t base = region->baseAddress - (space == coreMMSpace ? MM_CORE_SPACE_START : 0);
uintptr_t end = base + (region->pageCount << K_PAGE_BITS);
uintptr_t needed = 0;
for (uintptr_t i = base; i < end; i += 1L << (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 1)) {
uintptr_t indexL2 = i >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 1);
if (!(data->l1Commit[indexL2 >> 3] & (1 << (indexL2 & 7)))) needed++;
i = indexL2 << (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 1);
}
if (needed) {
if (!MMCommit(needed * K_PAGE_SIZE, true)) {
return false;
}
data->pageTablesCommitted += needed;
}
for (uintptr_t i = base; i < end; i += 1L << (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 1)) {
uintptr_t indexL2 = i >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 1);
data->l1Commit[indexL2 >> 3] |= (1 << (indexL2 & 7));
i = indexL2 << (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 1);
}
return true;
} }
bool MMArchInitialiseUserSpace(MMSpace *space, MMRegion *firstRegion) { bool MMArchInitialiseUserSpace(MMSpace *space, MMRegion *firstRegion) {
// TODO. // TODO.
KernelPanic("Unimplemented!\n");
return false; return false;
} }
void MMArchInitialise() {
// TODO.
}
void MMArchFreeVAS(MMSpace *space) { void MMArchFreeVAS(MMSpace *space) {
// TODO. // TODO.
KernelPanic("Unimplemented!\n");
} }
void MMArchFinalizeVAS(MMSpace *space) { void MMArchFinalizeVAS(MMSpace *space) {
// TODO. // TODO.
} KernelPanic("Unimplemented!\n");
uintptr_t MMArchEarlyAllocatePage() {
// TODO.
return 0;
}
uint64_t MMArchPopulatePageFrameDatabase() {
// TODO.
return 0;
}
uintptr_t MMArchGetPhysicalMemoryHighest() {
// TODO.
return 0;
}
void ProcessorDisableInterrupts() {
// TODO.
}
void ProcessorEnableInterrupts() {
// TODO.
}
bool ProcessorAreInterruptsEnabled() {
// TODO.
return false;
}
void ProcessorHalt() {
// TODO.
}
void ProcessorSendYieldIPI(Thread *thread) {
// TODO.
}
void ProcessorFakeTimerInterrupt() {
// TODO.
}
void ProcessorInvalidatePage(uintptr_t virtualAddress) {
// TODO.
}
void ProcessorInvalidateAllPages() {
// TODO.
}
void ProcessorFlushCodeCache() {
// TODO.
}
void ProcessorFlushCache() {
// TODO.
}
void ProcessorSetLocalStorage(CPULocalStorage *cls) {
// TODO.
}
void ProcessorSetThreadStorage(uintptr_t tls) {
// TODO.
}
void ProcessorSetAddressSpace(MMArchVAS *virtualAddressSpace) {
// TODO.
}
uint64_t ProcessorReadTimeStamp() {
// TODO.
return 0;
}
CPULocalStorage *GetLocalStorage() {
// TODO.
return nullptr;
}
Thread *GetCurrentThread() {
// TODO.
return nullptr;
}
uintptr_t MMArchTranslateAddress(MMSpace *space, uintptr_t virtualAddress, bool writeAccess) {
// TODO.
return 0;
}
uint32_t KPCIReadConfig(uint8_t bus, uint8_t device, uint8_t function, uint8_t offset, int size) {
// TODO.
return 0;
}
void KPCIWriteConfig(uint8_t bus, uint8_t device, uint8_t function, uint8_t offset, uint32_t value, int size) {
// TODO.
}
bool KRegisterIRQ(intptr_t interruptIndex, KIRQHandler handler, void *context, const char *cOwnerName, KPCIDevice *pciDevice) {
// TODO.
return false;
}
KMSIInformation KRegisterMSI(KIRQHandler handler, void *context, const char *cOwnerName) {
// TODO.
return {};
}
void KUnregisterMSI(uintptr_t tag) {
// TODO.
}
void ProcessorOut8(uint16_t port, uint8_t value) {
// TODO.
}
uint8_t ProcessorIn8(uint16_t port) {
// TODO.
return 0;
}
void ProcessorOut16(uint16_t port, uint16_t value) {
// TODO.
}
uint16_t ProcessorIn16(uint16_t port) {
// TODO.
return 0;
}
void ProcessorOut32(uint16_t port, uint32_t value) {
// TODO.
}
uint32_t ProcessorIn32(uint16_t port) {
// TODO.
return 0;
}
void ProcessorReset() {
// TODO.
}
uintptr_t ArchFindRootSystemDescriptorPointer() {
// TODO.
return 0;
} }
void ArchStartupApplicationProcessors() { void ArchStartupApplicationProcessors() {
// TODO. // TODO.
} }
uintptr_t GetBootloaderInformationOffset() { bool MMArchHandlePageFault(uintptr_t address, uint32_t flags) {
// TODO. address &= ~(K_PAGE_SIZE - 1);
return 0; bool forSupervisor = flags & MM_HANDLE_PAGE_FAULT_FOR_SUPERVISOR;
if (!ProcessorAreInterruptsEnabled()) {
KernelPanic("MMArchHandlePageFault - Page fault with interrupts disabled.\n");
}
if (address >= MM_CORE_REGIONS_START && address < MM_CORE_REGIONS_START + MM_CORE_REGIONS_COUNT * sizeof(MMRegion) && forSupervisor) {
// This is where coreMMSpace stores its regions.
// Allocate physical memory and map it.
MMArchMapPage(kernelMMSpace, MMPhysicalAllocate(MM_PHYSICAL_ALLOCATE_ZEROED), address, MM_MAP_PAGE_COMMIT_TABLES_NOW);
return true;
} else if (address >= MM_CORE_SPACE_START && address < MM_CORE_SPACE_START + MM_CORE_SPACE_SIZE && forSupervisor) {
return MMHandlePageFault(coreMMSpace, address, flags);
} else if (address >= MM_KERNEL_SPACE_START && address < MM_KERNEL_SPACE_START + MM_KERNEL_SPACE_SIZE && forSupervisor) {
return MMHandlePageFault(kernelMMSpace, address, flags);
} else if (address >= MM_MODULES_START && address < MM_MODULES_START + MM_MODULES_SIZE && forSupervisor) {
return MMHandlePageFault(kernelMMSpace, address, flags);
} else if (address >= K_PAGE_SIZE) {
Thread *thread = GetCurrentThread();
return MMHandlePageFault(thread->temporaryAddressSpace ?: thread->process->vmm, address, flags);
}
return false;
} }
extern "C" void ProcessorDebugOutputByte(uint8_t byte) { void ContextSanityCheck(InterruptContext *context) {
// TODO. if (context->cs > 0x100 || context->ds > 0x100 || context->ss > 0x100
|| (context->eip < 0xC0000000 && context->cs == 0x08)) {
KernelPanic("ContextSanityCheck - Corrupt context (%x/%x/%x/%x/%x/%x)\n",
context, context->cs, context->ds, context->ss, context->eip, context->esp);
}
}
extern "C" void InterruptHandler(InterruptContext *context) {
if (context->fromRing0) {
uint32_t esp = context->irq;
uint32_t ss = context->errorCode;
context->irq = context->eip;
context->errorCode = context->cs;
context->eip = context->flags;
context->cs = context->esp;
context->flags = context->ss;
context->esp = esp;
context->ss = ss;
}
CPULocalStorage *local = GetLocalStorage();
if (scheduler.panic && context->irq != 2) {
goto end;
}
if (context->irq < 0x20) {
// Processor exception.
ProcessorEnableInterrupts();
if ((context->cs & 3) == 0) {
bool handled = false;
if (context->irq == 0x0E && (~context->errorCode & (1 << 3))) {
if (MMArchHandlePageFault(context->cr2, MM_HANDLE_PAGE_FAULT_FOR_SUPERVISOR
| ((context->errorCode & 2) ? MM_HANDLE_PAGE_FAULT_WRITE : 0))) {
handled = true;
}
}
if (!handled) {
KernelPanic("Unresolvable processor exception encountered in supervisor mode.\n%z\nEIP = %x (CPU %d)\nX86 error codes: [err] %x, [cr2] %x\n"
"Stack: [esp] %x, [ebp] %x\nRegisters: [eax] %x, [ebx] %x, [esi] %x, [edi] %x.\nThread ID = %d\n",
exceptionInformation[context->irq], context->eip, local ? local->processorID : -1, context->errorCode, context->cr2,
context->esp, context->ebp, context->eax, context->ebx, context->esi, context->edi,
local && local->currentThread ? local->currentThread->id : -1);
}
} else {
// TODO User exceptions.
}
} else if (context->irq == 0xFF) {
// Spurious interrupt (APIC), ignore.
} else if (context->irq >= 0x20 && context->irq < 0x30) {
// Spurious interrupt (PIC), ignore.
} else if (context->irq == TLB_SHOOTDOWN_IPI) {
TLBShootdownCallback();
if (!tlbShootdownProcessorsRemaining) KernelPanic("InterruptHandler - tlbShootdownProcessorsRemaining is 0.\n");
__sync_fetch_and_sub(&tlbShootdownProcessorsRemaining, 1);
} else if (context->irq == TIMER_INTERRUPT) {
if (local && scheduler.started && local->schedulerReady) {
scheduler.Yield(context);
}
} else if (context->irq >= INTERRUPT_VECTOR_MSI_START && context->irq < INTERRUPT_VECTOR_MSI_START + INTERRUPT_VECTOR_MSI_COUNT && local) {
KSpinlockAcquire(&irqHandlersLock);
MSIHandler handler = msiHandlers[context->irq - INTERRUPT_VECTOR_MSI_START];
KSpinlockRelease(&irqHandlersLock);
local->irqSwitchThread = false;
if (!handler.callback) {
KernelLog(LOG_ERROR, "Arch", "unexpected MSI", "Unexpected MSI vector %X (no handler).\n", context->irq);
} else {
handler.callback(context->irq - INTERRUPT_VECTOR_MSI_START, handler.context);
}
if (local->irqSwitchThread && scheduler.started && local->schedulerReady) {
scheduler.Yield(context); // LapicEndOfInterrupt is called in PostContextSwitch.
}
LapicEndOfInterrupt();
} else if (context->irq >= IRQ_BASE && context->irq < IRQ_BASE + 0x20 && local) {
// See InterruptHandler in arch/x86_64/kernel.cpp for a discussion of what this is doing.
local->irqSwitchThread = false;
local->inIRQ = true;
uintptr_t line = context->irq - IRQ_BASE;
KSpinlockAcquire(&irqHandlersLock);
for (uintptr_t i = 0; i < sizeof(irqHandlers) / sizeof(irqHandlers[0]); i++) {
IRQHandler handler = irqHandlers[i];
if (!handler.callback) continue;
if (handler.line == -1) {
if (line != 9 && line != 10 && line != 11) {
continue;
} else {
uint8_t mappedLine = pciIRQLines[handler.pciDevice->slot][handler.pciDevice->interruptPin - 1];
if (mappedLine && line != mappedLine) {
continue;
}
}
} else {
if ((uintptr_t) handler.line != line) {
continue;
}
}
KSpinlockRelease(&irqHandlersLock);
handler.callback(context->irq - IRQ_BASE, handler.context);
KSpinlockAcquire(&irqHandlersLock);
}
KSpinlockRelease(&irqHandlersLock);
local->inIRQ = false;
if (local->irqSwitchThread && scheduler.started && local->schedulerReady) {
scheduler.Yield(context);
}
LapicEndOfInterrupt();
}
if (context->irq >= 0x30 && context->irq != 0xFF) {
LapicEndOfInterrupt();
}
end:;
ContextSanityCheck(context);
if (context->fromRing0) {
uint32_t irq = context->esp;
uint32_t errorCode = context->ss;
context->ss = context->flags;
context->esp = context->cs;
context->flags = context->eip;
context->cs = context->errorCode;
context->eip = context->irq;
context->irq = irq;
context->errorCode = errorCode;
}
} }
#include <kernel/terminal.cpp> #include <kernel/terminal.cpp>

View File

@ -1,8 +1,553 @@
[bits 32] [bits 32]
[global ProcessorReset]
[global ProcessorDebugOutputByte]
[global ProcessorIn16]
[global ProcessorIn32]
[global ProcessorIn8]
[global ProcessorOut16]
[global ProcessorOut32]
[global ProcessorOut8]
[global ProcessorHalt]
[global ProcessorDisableInterrupts]
[global ProcessorEnableInterrupts]
[global ProcessorAreInterruptsEnabled]
[global ProcessorSetLocalStorage]
[global ProcessorReadCR3]
[global ProcessorInvalidatePage]
[global ProcessorInvalidateAllPages]
[global ProcessorReadTimeStamp]
[global ProcessorSetThreadStorage]
[global ProcessorFakeTimerInterrupt]
[global ProcessorSetAddressSpace]
[global ProcessorFlushCodeCache]
[global GetLocalStorage]
[global GetCurrentThread]
[global ArchSwitchContext]
[global processorGDTR]
[global timeStampCounterSynchronizationValue]
[extern ArchNextTimer]
[extern InterruptHandler]
[extern KThreadTerminate]
[extern KernelMain]
[extern PostContextSwitch]
[extern SetupProcessor2]
[extern Syscall]
[extern installationID]
[extern PCSetupCOM1]
[extern PCDisablePIC]
[extern PCProcessMemoryMap]
[extern bootloaderID]
[extern bootloaderInformationOffset]
[section .bss]
%define boot_stack_size 10000
boot_stack: resb boot_stack_size
%define idt_size 2048
idt_data: resb idt_size
%define cpu_local_storage_size (256 * 4 * 4)
cpu_local_storage: resb cpu_local_storage_size
cpu_local_index: resb 4
timeStampCounterSynchronizationValue: resb 8
[section .data]
processorGDTR:
times 8 db 0
idt:
.limit: dw idt_size - 1
.base: dd idt_data
align 4
gdt_data:
.null_entry: dq 0
.code_entry: dd 0xFFFF ; 0x0008
db 0
dw 0xCF9A
db 0
.data_entry: dd 0xFFFF ; 0x0010
db 0
dw 0xCF92
db 0
.user_code: dd 0xFFFF ; 0x001B
db 0
dw 0xCFFA
db 0
.user_data: dd 0xFFFF ; 0x0023
db 0
dw 0xCFF2
db 0
.tss: dd 0x68 ; 0x0028
db 0
dw 0xE9
db 0
dq 0
.local: times 256 dq 0 ; 0x0038 - 0x0838
.gdt: dw (gdt_data.gdt - gdt_data - 1)
.gdt2: dd gdt_data
[section .text] [section .text]
[global _start] [global _start]
_start: _start:
; TODO ; Save the bootloader ID and information offset.
jmp $ mov [bootloaderID],esi
mov [bootloaderInformationOffset],edi
; The MBR bootloader does not know the address of the RSDP.
cmp edi,0
jne .standard_acpi
mov [0x7FE8],edi
.standard_acpi:
; Install the boot stack.
mov esp,boot_stack + boot_stack_size
; Load the installation ID.
mov eax,[edi + 0x7FF0]
mov [installationID + 0],eax
mov eax,[edi + 0x7FF4]
mov [installationID + 4],eax
mov eax,[edi + 0x7FF8]
mov [installationID + 8],eax
mov eax,[edi + 0x7FFC]
mov [installationID + 12],eax
; Load the new GDT, saving the location of the bootstrap GDT.
lgdt [gdt_data.gdt]
sgdt [processorGDTR]
; Move the identity paging the bootloader used to LOW_MEMORY_MAP_START.
; Then, map the local APIC to LOCAL_APIC_BASE.
mov eax,[0xFFFFF000]
mov [0xFFFFFEC0],eax
xor eax,eax
mov [0xFFFFF000],eax
mov eax,0xFEE00103
mov [0xFFC00000 + (0xEC3FF << 2)],eax
mov eax,cr3
mov cr3,eax
; Install the interrupt handlers
mov ebx,idt_data
%macro INSTALL_INTERRUPT_HANDLER 1
mov edx,InterruptHandler%1
call InstallInterruptHandler
add ebx,8
%endmacro
%assign i 0
%rep 256
INSTALL_INTERRUPT_HANDLER i
%assign i i+1
%endrep
; Setup the remaining things and call KernelMain.
call SetupProcessor1 ; Need to get SSE up before calling into C code.
call PCSetupCOM1
call PCDisablePIC
call PCProcessMemoryMap
call KernelMain
; Fall-through.
ProcessorReady:
; Set the timer and become this CPU's idle thread.
push 1
call ArchNextTimer
; Fall-through.
ProcessorIdle:
sti
hlt
jmp ProcessorIdle
SetupProcessor1:
; x87 FPU.
fninit
fldcw [.cw]
jmp .cwa
.cw: dw 0x037A
.cwa:
; Enable MMX, SSE and SSE2.
; TODO Check these are actually present!
mov eax,cr0
mov ebx,cr4
and eax,~4
or eax,2
or ebx,512 + 1024
mov cr0,eax
mov cr4,ebx
; Setup the local storage.
; This creates a new data segment in the GDT pointing to a unique 16-byte block of cpu_local_storage,
; and updates FS to use the data segment.
mov eax,[cpu_local_index]
mov ebx,eax
shl ebx,4
add ebx,cpu_local_storage
mov edx,ebx
shl ebx,16
or ebx,0x0000FFFF
mov ecx,edx
shr ecx,16
and edx,0xFF000000
or edx,0x00CF9200
or dl,cl
mov dword [gdt_data.local + eax * 8 + 0],ebx
mov dword [gdt_data.local + eax * 8 + 4],edx
lea eax,[0x0038 + eax * 8]
mov fs,ax
inc dword [cpu_local_index]
; Enable global pages.
mov eax,cr4
or eax,1 << 7
mov cr4,eax
; Enable write protect, so copy-on-write works in the kernel, and MMArchSafeCopy will page fault in read-only regions.
mov eax,cr0
or eax,1 << 16
mov cr0,eax
; Load the IDTR.
lidt [idt]
sti
; Enable the APIC.
; TODO Check it is actually present!
mov ecx,0x1B
rdmsr
or eax,0x800
wrmsr
; Set the spurious interrupt vector to 0xFF
mov eax,0xEC3FF0F0
mov ebx,[eax]
or ebx,0x1FF
mov [eax],ebx
; Use the flat processor addressing model
mov eax,0xEC3FF0E0
mov dword [eax],0xFFFFFFFF
; Make sure that no external interrupts are masked
xor eax,eax
mov [0xEC3FF080],eax
; TODO More feature detection and initialisation!
ret
InstallInterruptHandler:
mov word [ebx + 0],dx
mov word [ebx + 2],0x0008
mov word [ebx + 4],0x8E00
shr edx,16
mov word [ebx + 6],dx
ret
%macro INTERRUPT_HANDLER 1
InterruptHandler%1:
push dword 0 ; A fake error code
push dword %1 ; The interrupt number
jmp ASMInterruptHandler
%endmacro
%macro INTERRUPT_HANDLER_EC 1
InterruptHandler%1:
; The CPU already pushed an error code
push dword %1 ; The interrupt number
jmp ASMInterruptHandler
%endmacro
INTERRUPT_HANDLER 0
INTERRUPT_HANDLER 1
INTERRUPT_HANDLER 2
INTERRUPT_HANDLER 3
INTERRUPT_HANDLER 4
INTERRUPT_HANDLER 5
INTERRUPT_HANDLER 6
INTERRUPT_HANDLER 7
INTERRUPT_HANDLER_EC 8
INTERRUPT_HANDLER 9
INTERRUPT_HANDLER_EC 10
INTERRUPT_HANDLER_EC 11
INTERRUPT_HANDLER_EC 12
INTERRUPT_HANDLER_EC 13
INTERRUPT_HANDLER_EC 14
INTERRUPT_HANDLER 15
INTERRUPT_HANDLER 16
INTERRUPT_HANDLER_EC 17
INTERRUPT_HANDLER 18
INTERRUPT_HANDLER 19
INTERRUPT_HANDLER 20
INTERRUPT_HANDLER 21
INTERRUPT_HANDLER 22
INTERRUPT_HANDLER 23
INTERRUPT_HANDLER 24
INTERRUPT_HANDLER 25
INTERRUPT_HANDLER 26
INTERRUPT_HANDLER 27
INTERRUPT_HANDLER 28
INTERRUPT_HANDLER 29
INTERRUPT_HANDLER 30
INTERRUPT_HANDLER 31
%assign i 32
%rep 224
INTERRUPT_HANDLER i
%assign i i+1
%endrep
ASMInterruptHandler:
cld
test byte [esp + 12],3
jnz .have_esp
; When ring 0 is interrupted, ESP and SS aren't pushed.
; We push them ourselves here; we'll fix the order later.
push eax
push esp
mov eax,ss
xchg [esp + 4],eax
push 1
jmp .fixed
.have_esp:
push 0
.fixed:
push eax
push ebx
push ecx
push edx
push esi
push edi
push ebp
mov ebx,esp
and esp,~0xF
fxsave [esp - 512]
mov esp,ebx
sub esp,512 + 16
mov eax,ds
push eax
mov eax,0x10
mov ds,ax
mov eax,cr2
push eax
mov edx,[0xEC3FF080]
push edx
mov edx,0xF0
mov [0xEC3FF080],edx ; Mask all interrupts.
sti ; ...so there's no need to have the interrupt flag clear.
push esp
call InterruptHandler
add esp,4
xor eax,eax
.return:
cli ; Must be done before restoring CR8.
pop edx
mov [0xEC3FF080],edx
add esp,4
pop ebx
mov ds,bx
add esp,512 + 16
mov ebx,esp
and ebx,~0xF
fxrstor [ebx - 512]
or al,al
jz .old_thread
fninit ; New thread - initialise FPU.
.old_thread:
pop ebp
pop edi
pop esi
pop edx
pop ecx
pop ebx
pop eax
test byte [esp],1
jz .need_esp
; When returning to ring 0, ESP and SS aren't popped.
; So we do it manually here.
add esp,8
.need_esp:
add esp,12
iret
ArchSwitchContext:
cli
mov eax,[esp + 16]
mov [fs:8],eax
mov ebx,[esp + 12]
mov [fs:4],ebx
mov edi,[esp + 8]
mov ecx,[edi]
mov edx,cr3
cmp edx,ecx
je .cont
mov cr3,ecx
.cont:
mov eax,[esp + 4]
mov ecx,[esp + 20]
mov esp,eax ; Put the stack just below the context.
push ecx
push eax
call PostContextSwitch
add esp,8
jmp ASMInterruptHandler.return
ProcessorDebugOutputByte:
%ifdef COM_OUTPUT
mov dx,0x3F8 + 5
.WaitRead:
in al,dx
and al,0x20
cmp al,0
je .WaitRead
mov dx,0x3F8 + 0
mov eax,[esp + 4]
out dx,al
%endif
ret
ProcessorOut8:
mov edx,[esp + 4]
mov eax,[esp + 8]
out dx,al
ret
ProcessorIn8:
mov edx,[esp + 4]
xor eax,eax
in al,dx
ret
ProcessorOut16:
mov edx,[esp + 4]
mov eax,[esp + 8]
out dx,ax
ret
ProcessorIn16:
mov edx,[esp + 4]
xor eax,eax
in ax,dx
ret
ProcessorOut32:
mov edx,[esp + 4]
mov eax,[esp + 8]
out dx,eax
ret
ProcessorIn32:
mov edx,[esp + 4]
xor eax,eax
in eax,dx
ret
ProcessorReset:
in al,0x64
test al,2
jne ProcessorReset
mov al,0xFE
out 0x64,al
; Fall-through.
ProcessorHalt:
cli
hlt
jmp ProcessorHalt
ProcessorDisableInterrupts:
mov eax,0xE0 ; Still allow important IPIs to go through.
mov [0xEC3FF080],eax
ret
ProcessorEnableInterrupts:
xor eax,eax
mov [0xEC3FF080],eax
ret
ProcessorAreInterruptsEnabled:
xor al,al
mov edx,[0xEC3FF080]
or edx,edx
jnz .done
mov al,1
.done:
ret
GetLocalStorage:
mov eax,[fs:0]
ret
GetCurrentThread:
mov eax,[fs:8]
ret
ProcessorSetLocalStorage:
mov eax,[esp + 4]
mov [fs:0],eax
ret
ProcessorReadCR3:
mov eax,cr3
ret
ProcessorInvalidatePage:
mov eax,[esp + 4]
invlpg [eax]
ret
ProcessorInvalidateAllPages:
; Toggle CR4.PGE to invalidate all TLB entries, including global entries.
mov eax,cr4
and eax,~(1 << 7)
mov cr4,eax
or eax,1 << 7
mov cr4,eax
ret
ProcessorReadTimeStamp:
rdtsc
ret
ProcessorSetThreadStorage:
; TODO.
ret
ProcessorFakeTimerInterrupt:
int 0x40
ret
ProcessorSetAddressSpace:
mov eax,[esp + 4]
mov edx,[eax]
mov eax,cr3
cmp eax,edx
je .cont
mov cr3,edx
.cont:
ret
ProcessorFlushCodeCache:
wbinvd
ret

View File

@ -1,83 +0,0 @@
#ifndef ARCH_X86_64_HEADER
#define ARCH_X86_64_HEADER
#ifndef ES_ARCH_X86_64
#error Included x86_64.h but not targeting x86_64.
#endif
#include "x86_pc.h"
// --------------------------------- Interrupt vectors.
// Interrupt vectors:
// 0x00 - 0x1F: CPU exceptions
// 0x20 - 0x2F: PIC (disabled, spurious)
// 0x30 - 0x4F: Timers and low-priority IPIs.
// 0x50 - 0x6F: APIC (standard)
// 0x70 - 0xAF: MSI
// 0xF0 - 0xFE: High-priority IPIs
// 0xFF: APIC (spurious interrupt)
#define TIMER_INTERRUPT (0x40)
#define YIELD_IPI (0x41)
#define IRQ_BASE (0x50)
#define CALL_FUNCTION_ON_ALL_PROCESSORS_IPI (0xF0)
#define KERNEL_PANIC_IPI (0) // NMIs ignore the interrupt vector.
#define INTERRUPT_VECTOR_MSI_START (0x70)
#define INTERRUPT_VECTOR_MSI_COUNT (0x40)
// --------------------------------- Forward declarations.
extern "C" void gdt_data();
extern "C" void processorGDTR();
extern "C" uint64_t ProcessorReadCR3();
extern "C" uintptr_t ProcessorGetRSP();
extern "C" uintptr_t ProcessorGetRBP();
extern "C" uint64_t ProcessorReadMXCSR();
extern "C" void ProcessorInstallTSS(uint32_t *gdt, uint32_t *tss);
extern "C" void ProcessorAPStartup();
extern "C" void ProcessorReset();
extern "C" void SSSE3Framebuffer32To24Copy(volatile uint8_t *destination, volatile uint8_t *source, size_t pixelGroups);
extern "C" uintptr_t _KThreadTerminate;
extern bool pagingNXESupport;
extern bool pagingPCIDSupport;
extern bool pagingSMEPSupport;
extern bool pagingTCESupport;
extern volatile uint64_t timeStampCounterSynchronizationValue;
extern "C" bool simdSSE3Support;
extern "C" bool simdSSSE3Support;
extern uintptr_t bootloaderInformationOffset;
struct NewProcessorStorage {
struct CPULocalStorage *local;
uint32_t *gdt;
};
NewProcessorStorage AllocateNewProcessorStorage(struct ArchCPU *archCPU);
extern "C" void SetupProcessor2(struct NewProcessorStorage *);
void ArchDelay1Ms(); // Spin for approximately 1ms. Use only during initialisation. Not thread-safe.
uint64_t ArchGetTimeFromPITMs();
void *ACPIGetRSDP();
size_t ProcessorSendIPI(uintptr_t interrupt, bool nmi = false, int processorID = -1); // Returns the number of processors the IPI was *not* sent to.
void ArchSetPCIIRQLine(uint8_t slot, uint8_t pin, uint8_t line);
uintptr_t ArchFindRootSystemDescriptorPointer();
void ArchStartupApplicationProcessors();
struct InterruptContext {
uint64_t cr2, ds;
uint8_t fxsave[512 + 16];
uint64_t _check, cr8;
uint64_t r15, r14, r13, r12, r11, r10, r9, r8;
uint64_t rbp, rdi, rsi, rdx, rcx, rbx, rax;
uint64_t interruptNumber, errorCode;
uint64_t rip, cs, flags, rsp, ss;
};
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,61 @@
[bits 64] [bits 64]
[global ArchSwitchContext]
[global GetCurrentThread]
[global GetLocalStorage]
[global MMArchSafeCopy]
[global ProcessorAPStartup]
[global ProcessorAreInterruptsEnabled]
[global ProcessorDebugOutputByte]
[global ProcessorDisableInterrupts]
[global ProcessorEnableInterrupts]
[global ProcessorFakeTimerInterrupt]
[global ProcessorFlushCodeCache]
[global ProcessorGetRBP]
[global ProcessorGetRSP]
[global ProcessorHalt]
[global ProcessorIn16]
[global ProcessorIn32]
[global ProcessorIn8]
[global ProcessorInstallTSS]
[global ProcessorInvalidateAllPages]
[global ProcessorInvalidatePage]
[global ProcessorOut16]
[global ProcessorOut32]
[global ProcessorOut8]
[global ProcessorReadCR3]
[global ProcessorReadMXCSR]
[global ProcessorReadTimeStamp]
[global ProcessorReset]
[global ProcessorSetAddressSpace]
[global ProcessorSetLocalStorage]
[global ProcessorSetThreadStorage]
[global _KThreadTerminate]
[global _start]
[global gdt_data]
[global pagingNXESupport]
[global pagingPCIDSupport]
[global pagingSMEPSupport]
[global pagingTCESupport]
[global processorGDTR]
[global simdSSE3Support]
[global simdSSSE3Support]
[global timeStampCounterSynchronizationValue]
[extern ArchNextTimer]
[extern InterruptHandler]
[extern KThreadTerminate]
[extern KernelMain]
[extern PostContextSwitch]
[extern SetupProcessor2]
[extern Syscall]
[extern installationID]
[extern PCSetupCOM1]
[extern PCDisablePIC]
[extern PCProcessMemoryMap]
[extern bootloaderID]
[extern bootloaderInformationOffset]
[section .bss] [section .bss]
align 16 align 16
@ -23,62 +79,27 @@ idt:
cpu_local_storage_index: cpu_local_storage_index:
dq 0 dq 0
[global physicalMemoryRegions]
physicalMemoryRegions:
dq 0xFFFFFE0000060000
[global physicalMemoryRegionsCount]
physicalMemoryRegionsCount:
dq 0
[global physicalMemoryRegionsPagesCount]
physicalMemoryRegionsPagesCount:
dq 0
[global physicalMemoryOriginalPagesCount]
physicalMemoryOriginalPagesCount:
dq 0
[global physicalMemoryRegionsIndex]
physicalMemoryRegionsIndex:
dq 0
[global physicalMemoryHighest]
physicalMemoryHighest:
dq 0
[global pagingNXESupport]
pagingNXESupport: pagingNXESupport:
dd 1 dd 1
[global pagingPCIDSupport]
pagingPCIDSupport: pagingPCIDSupport:
dd 1 dd 1
[global pagingSMEPSupport]
pagingSMEPSupport: pagingSMEPSupport:
dd 1 dd 1
[global pagingTCESupport]
pagingTCESupport: pagingTCESupport:
dd 1 dd 1
[global simdSSE3Support]
simdSSE3Support: simdSSE3Support:
dd 1 dd 1
[global simdSSSE3Support]
simdSSSE3Support: simdSSSE3Support:
dd 1 dd 1
[global bootloaderID]
bootloaderID:
dd 0
[global bootloaderInformationOffset]
bootloaderInformationOffset:
dq 0
align 16 align 16
[global processorGDTR]
processorGDTR: processorGDTR:
dq 0 dq 0
dq 0 dq 0
[section .text] [section .text]
[global _start]
_start: _start:
cli
mov rax,0x63 mov rax,0x63
mov fs,ax mov fs,ax
mov gs,ax mov gs,ax
@ -87,20 +108,20 @@ _start:
mov rax,bootloaderID mov rax,bootloaderID
mov [rax],rsi mov [rax],rsi
; The MBR bootloader does not know the address of the RSDP.
cmp rdi,0 cmp rdi,0
jne .standard_acpi jne .standard_acpi
mov [0x7FE8],rdi mov [0x7FE8],rdi
.standard_acpi: .standard_acpi:
; Install a stack
mov rsp,stack + stack_size
; Save the bootloader information offset. ; Save the bootloader information offset.
mov rax,bootloaderInformationOffset mov rax,bootloaderInformationOffset
mov [rax],rdi mov [rax],rdi
; Install a stack
mov rsp,stack + stack_size
; Load the installation ID. ; Load the installation ID.
[extern installationID]
mov rbx,installationID mov rbx,installationID
mov rax,[rdi + 0x7FF0] mov rax,[rdi + 0x7FF0]
mov [rbx],rax mov [rbx],rax
@ -113,57 +134,9 @@ _start:
mov rax,cr3 mov rax,cr3
mov cr3,rax mov cr3,rax
SetupCOM1: call PCSetupCOM1
; Setup the serial COM1 port for debug output. call PCDisablePIC
%ifdef COM_OUTPUT call PCProcessMemoryMap
mov dx,0x3F8 + 1
mov al,0x00
out dx,al
mov dx,0x3F8 + 3
mov al,0x80
out dx,al
mov dx,0x3F8 + 0
mov al,0x03
out dx,al
mov dx,0x3F8 + 1
mov al,0x00
out dx,al
mov dx,0x3F8 + 3
mov al,0x03
out dx,al
mov dx,0x3F8 + 2
mov al,0xC7
out dx,al
mov dx,0x3F8 + 4
mov al,0x0B
out dx,al
%endif
InstallIDT:
; Remap the ISRs sent by the PIC to 0x20 - 0x2F
; Even though we'll mask the PIC to use the APIC,
; we have to do this so that the spurious interrupts
; are set to a sane vector range.
mov al,0x11
out 0x20,al
mov al,0x11
out 0xA0,al
mov al,0x20
out 0x21,al
mov al,0x28
out 0xA1,al
mov al,0x04
out 0x21,al
mov al,0x02
out 0xA1,al
mov al,0x01
out 0x21,al
mov al,0x01
out 0xA1,al
mov al,0x00
out 0x21,al
mov al,0x00
out 0xA1,al
; Install the interrupt handlers ; Install the interrupt handlers
%macro INSTALL_INTERRUPT_HANDLER 1 %macro INSTALL_INTERRUPT_HANDLER 1
@ -181,78 +154,21 @@ InstallIDT:
mov rcx,processorGDTR mov rcx,processorGDTR
sgdt [rcx] sgdt [rcx]
MemoryCalculations:
; Work out basic information about the physical memory map we got from the bootloader
mov rax,bootloaderInformationOffset
mov rax,[rax]
mov rbx,physicalMemoryRegions
add [rbx],rax
mov rdi,0xFFFFFE0000060000 - 0x10
add rdi,rax
mov rsi,0xFFFFFE0000060000
add rsi,rax
xor rax,rax
xor r8,r8
.loop:
add rdi,0x10
mov r9,[rdi + 8]
shl r9,12
add r9,[rdi]
cmp r9,r8
jb .lower
mov r8,r9
.lower:
add rax,[rdi + 8]
cmp qword [rdi],0
jne .loop
mov rbx,[rdi + 8]
sub rax,rbx
sub rdi,rsi
shr rdi,4
mov rsi,physicalMemoryRegionsCount
mov [rsi],rdi
mov rsi,physicalMemoryRegionsPagesCount
mov [rsi],rax
mov rsi,physicalMemoryOriginalPagesCount
mov [rsi],rbx
mov rsi,physicalMemoryHighest
mov [rsi],r8
DisablePIC:
; Disable the PIC by masking all its interrupts, as we're going to use the APIC instead.
; For some reason, it'll still generate spurious interrupts, so we'll have to ignore those.
mov al,0xFF
out 0xA1,al
out 0x21,al
StartKernel:
; First stage of processor initilisation ; First stage of processor initilisation
call SetupProcessor1 call SetupProcessor1
; Print a divider line.
mov rdi,'-'
mov rcx,10
.line: call ProcessorDebugOutputByte
loop .line
mov rdi,10
call ProcessorDebugOutputByte
mov rdi,13
call ProcessorDebugOutputByte
; Call the KernelMain function ; Call the KernelMain function
and rsp,~0xF and rsp,~0xF
extern KernelMain
call KernelMain call KernelMain
ProcessorReady: ProcessorReady:
; Set the timer and become this CPU's idle thread. ; Set the timer and become this CPU's idle thread.
mov rdi,1 mov rdi,1
[extern ArchNextTimer]
call ArchNextTimer call ArchNextTimer
jmp ProcessorIdle jmp ProcessorIdle
SetupProcessor1: SetupProcessor1:
EnableCPUFeatures: .enable_cpu_features:
; Enable no-execute support, if available ; Enable no-execute support, if available
mov eax,0x80000001 mov eax,0x80000001
cpuid cpuid
@ -391,7 +307,7 @@ EnableCPUFeatures:
or eax,0x00010000 or eax,0x00010000
wrmsr wrmsr
SetupCPULocalStorage: .setup_cpu_local_storage:
mov ecx,0xC0000101 mov ecx,0xC0000101
mov rax,cpu_local_storage mov rax,cpu_local_storage
mov rdx,cpu_local_storage mov rdx,cpu_local_storage
@ -401,13 +317,13 @@ SetupCPULocalStorage:
add qword [rdi],32 ; Space for 4 8-byte values at gs:0 - gs:31 add qword [rdi],32 ; Space for 4 8-byte values at gs:0 - gs:31
wrmsr wrmsr
LoadIDTR: .load_idtr:
; Load the IDTR ; Load the IDTR
mov rax,idt mov rax,idt
lidt [rax] lidt [rax]
sti sti
EnableAPIC: .enable_apic:
; Enable the APIC! ; Enable the APIC!
; Since we're on AMD64, we know that the APIC will be present. ; Since we're on AMD64, we know that the APIC will be present.
mov ecx,0x1B mov ecx,0x1B
@ -453,7 +369,6 @@ SyscallEntry:
; Arguments in RDI, RSI, RDX, R8, R9. (RCX contains return address). ; Arguments in RDI, RSI, RDX, R8, R9. (RCX contains return address).
; Return value in RAX. ; Return value in RAX.
[extern Syscall]
mov rbx,rsp mov rbx,rsp
and rsp,~0xF and rsp,~0xF
call Syscall call Syscall
@ -476,19 +391,16 @@ SyscallEntry:
db 0x48 db 0x48
sysret sysret
[global ProcessorFakeTimerInterrupt]
ProcessorFakeTimerInterrupt: ProcessorFakeTimerInterrupt:
int 0x40 int 0x40
ret ret
[global ProcessorDisableInterrupts]
ProcessorDisableInterrupts: ProcessorDisableInterrupts:
mov rax,14 ; Still allow important IPIs to go through. mov rax,14 ; Still allow important IPIs to go through.
mov cr8,rax mov cr8,rax
sti ; TODO Where is this necessary? Is is a performance issue? sti ; TODO Where is this necessary? Is is a performance issue?
ret ret
[global ProcessorEnableInterrupts]
ProcessorEnableInterrupts: ProcessorEnableInterrupts:
; WARNING: Changing this mechanism also requires update in x86_64.cpp, when deciding if we should re-enable interrupts on exception. ; WARNING: Changing this mechanism also requires update in x86_64.cpp, when deciding if we should re-enable interrupts on exception.
mov rax,0 mov rax,0
@ -496,7 +408,6 @@ ProcessorEnableInterrupts:
sti ; TODO Where is this necessary? Is is a performance issue? sti ; TODO Where is this necessary? Is is a performance issue?
ret ret
[global ProcessorAreInterruptsEnabled]
ProcessorAreInterruptsEnabled: ProcessorAreInterruptsEnabled:
pushf pushf
pop rax pop rax
@ -515,60 +426,51 @@ ProcessorAreInterruptsEnabled:
; shr rax,9 ; shr rax,9
ret ret
[global ProcessorHalt]
ProcessorHalt: ProcessorHalt:
cli cli
hlt hlt
jmp ProcessorHalt jmp ProcessorHalt
[global ProcessorOut8]
ProcessorOut8: ProcessorOut8:
mov rdx,rdi mov rdx,rdi
mov rax,rsi mov rax,rsi
out dx,al out dx,al
ret ret
[global ProcessorIn8]
ProcessorIn8: ProcessorIn8:
mov rdx,rdi mov rdx,rdi
xor rax,rax xor rax,rax
in al,dx in al,dx
ret ret
[global ProcessorOut16]
ProcessorOut16: ProcessorOut16:
mov rdx,rdi mov rdx,rdi
mov rax,rsi mov rax,rsi
out dx,ax out dx,ax
ret ret
[global ProcessorIn16]
ProcessorIn16: ProcessorIn16:
mov rdx,rdi mov rdx,rdi
xor rax,rax xor rax,rax
in ax,dx in ax,dx
ret ret
[global ProcessorOut32]
ProcessorOut32: ProcessorOut32:
mov rdx,rdi mov rdx,rdi
mov rax,rsi mov rax,rsi
out dx,eax out dx,eax
ret ret
[global ProcessorIn32]
ProcessorIn32: ProcessorIn32:
mov rdx,rdi mov rdx,rdi
xor rax,rax xor rax,rax
in eax,dx in eax,dx
ret ret
[global ProcessorInvalidatePage]
ProcessorInvalidatePage: ProcessorInvalidatePage:
invlpg [rdi] invlpg [rdi]
ret ret
[global ProcessorInvalidateAllPages]
ProcessorInvalidateAllPages: ProcessorInvalidateAllPages:
; Toggle CR4.PGE to invalidate all TLB entries, including global entries. ; Toggle CR4.PGE to invalidate all TLB entries, including global entries.
mov rax,cr4 mov rax,cr4
@ -583,22 +485,18 @@ ProcessorIdle:
hlt hlt
jmp ProcessorIdle jmp ProcessorIdle
[global GetLocalStorage]
GetLocalStorage: GetLocalStorage:
mov rax,[gs:0] mov rax,[gs:0]
ret ret
[global GetCurrentThread]
GetCurrentThread: GetCurrentThread:
mov rax,[gs:16] mov rax,[gs:16]
ret ret
[global ProcessorSetLocalStorage]
ProcessorSetLocalStorage: ProcessorSetLocalStorage:
mov [gs:0],rdi mov [gs:0],rdi
ret ret
[global ProcessorSetThreadStorage]
ProcessorSetThreadStorage: ProcessorSetThreadStorage:
push rdx push rdx
push rcx push rcx
@ -718,7 +616,6 @@ ASMInterruptHandler:
mov rdi,rsp mov rdi,rsp
mov rbx,rsp mov rbx,rsp
and rsp,~0xF and rsp,~0xF
extern InterruptHandler
call InterruptHandler call InterruptHandler
mov rsp,rbx mov rsp,rbx
xor rax,rax xor rax,rax
@ -767,7 +664,6 @@ ReturnFromInterruptHandler:
add rsp,16 add rsp,16
iretq iretq
[global ProcessorSetAddressSpace]
ProcessorSetAddressSpace: ProcessorSetAddressSpace:
mov rdi,[rdi] mov rdi,[rdi]
mov rax,cr3 mov rax,cr3
@ -777,18 +673,14 @@ ProcessorSetAddressSpace:
.cont: .cont:
ret ret
[global ProcessorGetRSP]
ProcessorGetRSP: ProcessorGetRSP:
mov rax,rsp mov rax,rsp
ret ret
[global ProcessorGetRBP]
ProcessorGetRBP: ProcessorGetRBP:
mov rax,rbp mov rax,rbp
ret ret
[extern PostContextSwitch]
[global ArchSwitchContext]
ArchSwitchContext: ArchSwitchContext:
cli cli
mov [gs:16],rcx mov [gs:16],rcx
@ -804,12 +696,10 @@ ArchSwitchContext:
call PostContextSwitch call PostContextSwitch
jmp ReturnFromInterruptHandler jmp ReturnFromInterruptHandler
[global ProcessorReadCR3]
ProcessorReadCR3: ProcessorReadCR3:
mov rax,cr3 mov rax,cr3
ret ret
[global ProcessorDebugOutputByte]
ProcessorDebugOutputByte: ProcessorDebugOutputByte:
%ifdef COM_OUTPUT %ifdef COM_OUTPUT
mov dx,0x3F8 + 5 mov dx,0x3F8 + 5
@ -824,19 +714,16 @@ ProcessorDebugOutputByte:
%endif %endif
ret ret
[global ProcessorReadTimeStamp]
ProcessorReadTimeStamp: ProcessorReadTimeStamp:
rdtsc rdtsc
shl rdx,32 shl rdx,32
or rax,rdx or rax,rdx
ret ret
[global ProcessorFlushCodeCache]
ProcessorFlushCodeCache: ProcessorFlushCodeCache:
wbinvd wbinvd
ret ret
[global ProcessorReadMXCSR]
ProcessorReadMXCSR: ProcessorReadMXCSR:
mov rax,.buffer mov rax,.buffer
stmxcsr [rax] stmxcsr [rax]
@ -845,7 +732,6 @@ ProcessorReadMXCSR:
ret ret
.buffer: dq 0 .buffer: dq 0
[global ProcessorInstallTSS]
ProcessorInstallTSS: ProcessorInstallTSS:
push rbx push rbx
@ -875,7 +761,6 @@ ProcessorInstallTSS:
pop rbx pop rbx
ret ret
[global MMArchSafeCopy]
MMArchSafeCopy: MMArchSafeCopy:
call GetCurrentThread call GetCurrentThread
mov byte [rax + 0],1 ; see definition of Thread mov byte [rax + 0],1 ; see definition of Thread
@ -890,7 +775,6 @@ MMArchSafeCopy:
mov al,0 mov al,0
ret ret
[global ProcessorReset]
ProcessorReset: ProcessorReset:
in al,0x64 in al,0x64
test al,2 test al,2
@ -899,8 +783,6 @@ ProcessorReset:
out 0x64,al out 0x64,al
jmp $ jmp $
[global _KThreadTerminate]
[extern KThreadTerminate]
_KThreadTerminate: _KThreadTerminate:
sub rsp,8 sub rsp,8
jmp KThreadTerminate jmp KThreadTerminate
@ -921,10 +803,8 @@ SynchronizeTimeStampCounter:
shr rdx,32 shr rdx,32
wrmsr wrmsr
ret ret
[global timeStampCounterSynchronizationValue]
timeStampCounterSynchronizationValue: dq 0 timeStampCounterSynchronizationValue: dq 0
[global ProcessorAPStartup]
[bits 16] [bits 16]
ProcessorAPStartup: ; This function must be less than 4KB in length (see drivers/acpi.cpp) ProcessorAPStartup: ; This function must be less than 4KB in length (see drivers/acpi.cpp)
mov ax,0x1000 mov ax,0x1000
@ -966,14 +846,12 @@ ProcessorAPStartup: ; This function must be less than 4KB in length (see drivers
mov rsp,[0x10FD0] mov rsp,[0x10FD0]
call SetupProcessor1 call SetupProcessor1
call SynchronizeTimeStampCounter call SynchronizeTimeStampCounter
[extern SetupProcessor2]
mov rdi,[0x10FB0] mov rdi,[0x10FB0]
call SetupProcessor2 call SetupProcessor2
mov byte [0x10FC0],2 ; Indicate the BSP can start the next processor. mov byte [0x10FC0],2 ; Indicate the BSP can start the next processor.
and rsp,~0xF and rsp,~0xF
jmp ProcessorReady jmp ProcessorReady
[global gdt_data]
gdt_data: gdt_data:
.null_entry: dq 0 .null_entry: dq 0
.code_entry: dd 0xFFFF ; 0x08 .code_entry: dd 0xFFFF ; 0x08

990
arch/x86_pc.cpp Normal file
View File

@ -0,0 +1,990 @@
#include <arch/x86_pc.h>
extern "C" uint64_t ProcessorReadCR3();
struct MSIHandler {
KIRQHandler callback;
void *context;
};
struct IRQHandler {
KIRQHandler callback;
void *context;
intptr_t line;
KPCIDevice *pciDevice;
const char *cOwnerName;
};
uint8_t pciIRQLines[0x100 /* slots */][4 /* pins */];
MSIHandler msiHandlers[INTERRUPT_VECTOR_MSI_COUNT];
IRQHandler irqHandlers[0x40];
KSpinlock irqHandlersLock; // Also for msiHandlers.
extern volatile uint64_t timeStampCounterSynchronizationValue;
PhysicalMemoryRegion *physicalMemoryRegions;
size_t physicalMemoryRegionsCount;
size_t physicalMemoryRegionsPagesCount;
size_t physicalMemoryOriginalPagesCount;
size_t physicalMemoryRegionsIndex;
uintptr_t physicalMemoryHighest;
uint32_t bootloaderID;
uintptr_t bootloaderInformationOffset;
// Spinlock since some drivers need to access it in IRQs (e.g. ACPICA).
KSpinlock pciConfigSpinlock;
KSpinlock ipiLock;
const char *const exceptionInformation[] = {
"0x00: Divide Error (Fault)",
"0x01: Debug Exception (Fault/Trap)",
"0x02: Non-Maskable External Interrupt (Interrupt)",
"0x03: Breakpoint (Trap)",
"0x04: Overflow (Trap)",
"0x05: BOUND Range Exceeded (Fault)",
"0x06: Invalid Opcode (Fault)",
"0x07: x87 Coprocessor Unavailable (Fault)",
"0x08: Double Fault (Abort)",
"0x09: x87 Coprocessor Segment Overrun (Fault)",
"0x0A: Invalid TSS (Fault)",
"0x0B: Segment Not Present (Fault)",
"0x0C: Stack Protection (Fault)",
"0x0D: General Protection (Fault)",
"0x0E: Page Fault (Fault)",
"0x0F: Reserved/Unknown",
"0x10: x87 FPU Floating-Point Error (Fault)",
"0x11: Alignment Check (Fault)",
"0x12: Machine Check (Abort)",
"0x13: SIMD Floating-Point Exception (Fault)",
"0x14: Virtualization Exception (Fault)",
"0x15: Reserved/Unknown",
"0x16: Reserved/Unknown",
"0x17: Reserved/Unknown",
"0x18: Reserved/Unknown",
"0x19: Reserved/Unknown",
"0x1A: Reserved/Unknown",
"0x1B: Reserved/Unknown",
"0x1C: Reserved/Unknown",
"0x1D: Reserved/Unknown",
"0x1E: Reserved/Unknown",
"0x1F: Reserved/Unknown",
};
uint32_t LapicReadRegister(uint32_t reg) {
#ifdef ES_ARCH_X86_64
return acpi.lapicAddress[reg];
#else
return ((volatile uint32_t *) LOCAL_APIC_BASE)[reg];
#endif
}
void LapicWriteRegister(uint32_t reg, uint32_t value) {
#ifdef ES_ARCH_X86_64
acpi.lapicAddress[reg] = value;
#else
((volatile uint32_t *) LOCAL_APIC_BASE)[reg] = value;
#endif
}
void LapicNextTimer(size_t ms) {
LapicWriteRegister(0x320 >> 2, TIMER_INTERRUPT | (1 << 17));
LapicWriteRegister(0x380 >> 2, acpi.lapicTicksPerMs * ms);
}
void LapicEndOfInterrupt() {
LapicWriteRegister(0xB0 >> 2, 0);
}
uintptr_t MMArchEarlyAllocatePage() {
uintptr_t i = physicalMemoryRegionsIndex;
while (!physicalMemoryRegions[i].pageCount) {
i++;
if (i == physicalMemoryRegionsCount) {
KernelPanic("MMArchEarlyAllocatePage - Expected more pages in physical regions.\n");
}
}
PhysicalMemoryRegion *region = physicalMemoryRegions + i;
uintptr_t returnValue = region->baseAddress;
region->baseAddress += K_PAGE_SIZE;
region->pageCount--;
physicalMemoryRegionsPagesCount--;
physicalMemoryRegionsIndex = i;
return returnValue;
}
uint64_t MMArchPopulatePageFrameDatabase() {
uint64_t commitLimit = 0;
for (uintptr_t i = 0; i < physicalMemoryRegionsCount; i++) {
uintptr_t base = physicalMemoryRegions[i].baseAddress >> K_PAGE_BITS;
uintptr_t count = physicalMemoryRegions[i].pageCount;
commitLimit += count;
for (uintptr_t j = 0; j < count; j++) {
MMPhysicalInsertFreePagesNext(base + j);
}
}
physicalMemoryRegionsPagesCount = 0;
return commitLimit;
}
uintptr_t MMArchGetPhysicalMemoryHighest() {
return physicalMemoryHighest;
}
void ProcessorOut8Delayed(uint16_t port, uint8_t value) {
ProcessorOut8(port, value);
// Read an unused port to get a short delay.
ProcessorIn8(IO_UNUSED_DELAY);
}
extern "C" void PCSetupCOM1() {
#ifdef COM_OUTPUT
ProcessorOut8Delayed(IO_COM_1 + 1, 0x00);
ProcessorOut8Delayed(IO_COM_1 + 3, 0x80);
ProcessorOut8Delayed(IO_COM_1 + 0, 0x03);
ProcessorOut8Delayed(IO_COM_1 + 1, 0x00);
ProcessorOut8Delayed(IO_COM_1 + 3, 0x03);
ProcessorOut8Delayed(IO_COM_1 + 2, 0xC7);
ProcessorOut8Delayed(IO_COM_1 + 4, 0x0B);
// Print a divider line.
for (uint8_t i = 0; i < 10; i++) ProcessorDebugOutputByte('-');
ProcessorDebugOutputByte('\r');
ProcessorDebugOutputByte('\n');
#endif
}
extern "C" void PCDisablePIC() {
// Remap the ISRs sent by the PIC to 0x20 - 0x2F.
// Even though we'll mask the PIC to use the APIC,
// we have to do this so that the spurious interrupts are sent to a reasonable vector range.
ProcessorOut8Delayed(IO_PIC_1_COMMAND, 0x11);
ProcessorOut8Delayed(IO_PIC_2_COMMAND, 0x11);
ProcessorOut8Delayed(IO_PIC_1_DATA, 0x20);
ProcessorOut8Delayed(IO_PIC_2_DATA, 0x28);
ProcessorOut8Delayed(IO_PIC_1_DATA, 0x04);
ProcessorOut8Delayed(IO_PIC_2_DATA, 0x02);
ProcessorOut8Delayed(IO_PIC_1_DATA, 0x01);
ProcessorOut8Delayed(IO_PIC_2_DATA, 0x01);
// Mask all interrupts.
ProcessorOut8Delayed(IO_PIC_1_DATA, 0xFF);
ProcessorOut8Delayed(IO_PIC_2_DATA, 0xFF);
}
extern "C" void PCProcessMemoryMap() {
physicalMemoryRegions = (PhysicalMemoryRegion *) (LOW_MEMORY_MAP_START + 0x60000 + bootloaderInformationOffset);
for (uintptr_t i = 0; physicalMemoryRegions[i].baseAddress; i++) {
PhysicalMemoryRegion region = physicalMemoryRegions[i];
uint64_t end = region.baseAddress + (region.pageCount << K_PAGE_BITS);
#ifdef ES_BITS_32
if (end > 0x100000000) { region.pageCount = 0; continue; }
#endif
physicalMemoryRegionsPagesCount += region.pageCount;
if (end > physicalMemoryHighest) physicalMemoryHighest = end;
physicalMemoryRegionsCount++;
}
physicalMemoryOriginalPagesCount = physicalMemoryRegions[physicalMemoryRegionsCount].pageCount;
}
uintptr_t GetBootloaderInformationOffset() {
return bootloaderInformationOffset;
}
uint32_t KPCIReadConfig(uint8_t bus, uint8_t device, uint8_t function, uint8_t offset, int size) {
KSpinlockAcquire(&pciConfigSpinlock);
EsDefer(KSpinlockRelease(&pciConfigSpinlock));
if (offset & 3) KernelPanic("KPCIReadConfig - offset is not 4-byte aligned.");
ProcessorOut32(IO_PCI_CONFIG, (uint32_t) (0x80000000 | (bus << 16) | (device << 11) | (function << 8) | offset));
if (size == 8) return ProcessorIn8(IO_PCI_DATA);
if (size == 16) return ProcessorIn16(IO_PCI_DATA);
if (size == 32) return ProcessorIn32(IO_PCI_DATA);
KernelPanic("PCIController::ReadConfig - Invalid size %d.\n", size);
return 0;
}
void KPCIWriteConfig(uint8_t bus, uint8_t device, uint8_t function, uint8_t offset, uint32_t value, int size) {
KSpinlockAcquire(&pciConfigSpinlock);
EsDefer(KSpinlockRelease(&pciConfigSpinlock));
if (offset & 3) KernelPanic("KPCIWriteConfig - offset is not 4-byte aligned.");
ProcessorOut32(IO_PCI_CONFIG, (uint32_t) (0x80000000 | (bus << 16) | (device << 11) | (function << 8) | offset));
if (size == 8) ProcessorOut8(IO_PCI_DATA, value);
else if (size == 16) ProcessorOut16(IO_PCI_DATA, value);
else if (size == 32) ProcessorOut32(IO_PCI_DATA, value);
else KernelPanic("PCIController::WriteConfig - Invalid size %d.\n", size);
}
void MMArchUnmapPages(MMSpace *space, uintptr_t virtualAddressStart, uintptr_t pageCount, unsigned flags, size_t unmapMaximum, uintptr_t *resumePosition) {
// We can't let anyone use the unmapped pages until they've been invalidated on all processors.
// This also synchronises modified bit updating.
KMutexAcquire(&pmm.pageFrameMutex);
EsDefer(KMutexRelease(&pmm.pageFrameMutex));
KMutexAcquire(&space->data.mutex);
EsDefer(KMutexRelease(&space->data.mutex));
#ifdef ES_ARCH_X86_64
uintptr_t tableBase = virtualAddressStart & 0x0000FFFFFFFFF000;
#else
uintptr_t tableBase = virtualAddressStart & 0xFFFFF000;
#endif
uintptr_t start = resumePosition ? *resumePosition : 0;
// TODO Freeing newly empty page tables.
// - What do we need to invalidate when we do this?
for (uintptr_t i = start; i < pageCount; i++) {
uintptr_t virtualAddress = (i << K_PAGE_BITS) + tableBase;
#ifdef ES_ARCH_X86_64
if ((PAGE_TABLE_L4[virtualAddress >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 3)] & 1) == 0) {
i -= (virtualAddress >> K_PAGE_BITS) % (1 << (ENTRIES_PER_PAGE_TABLE_BITS * 3));
i += (1 << (ENTRIES_PER_PAGE_TABLE_BITS * 3));
continue;
}
if ((PAGE_TABLE_L3[virtualAddress >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 2)] & 1) == 0) {
i -= (virtualAddress >> K_PAGE_BITS) % (1 << (ENTRIES_PER_PAGE_TABLE_BITS * 2));
i += (1 << (ENTRIES_PER_PAGE_TABLE_BITS * 2));
continue;
}
#endif
if ((PAGE_TABLE_L2[virtualAddress >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 1)] & 1) == 0) {
i -= (virtualAddress >> K_PAGE_BITS) % (1 << (ENTRIES_PER_PAGE_TABLE_BITS * 1));
i += (1 << (ENTRIES_PER_PAGE_TABLE_BITS * 1));
continue;
}
uintptr_t indexL1 = virtualAddress >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 0);
uintptr_t translation = PAGE_TABLE_L1[indexL1];
if (!(translation & 1)) {
// The page wasn't mapped.
continue;
}
bool copy = translation & (1 << 9);
if (copy && (flags & MM_UNMAP_PAGES_BALANCE_FILE) && (~flags & MM_UNMAP_PAGES_FREE_COPIED)) {
// Ignore copied pages when balancing file mappings.
continue;
}
if ((~translation & (1 << 5)) || (~translation & (1 << 6))) {
// See MMArchMapPage for a discussion of why these bits must be set.
KernelPanic("MMArchUnmapPages - Page found without accessed or dirty bit set (virtualAddress: %x, translation: %x).\n",
virtualAddress, translation);
}
PAGE_TABLE_L1[indexL1] = 0;
#ifdef ES_ARCH_X86_64
uintptr_t physicalAddress = translation & 0x0000FFFFFFFFF000;
#else
uintptr_t physicalAddress = translation & 0xFFFFF000;
#endif
if ((flags & MM_UNMAP_PAGES_FREE) || ((flags & MM_UNMAP_PAGES_FREE_COPIED) && copy)) {
MMPhysicalFree(physicalAddress, true);
} else if (flags & MM_UNMAP_PAGES_BALANCE_FILE) {
// It's safe to do this before page invalidation,
// because the page fault handler is synchronised with the same mutexes acquired above.
if (MMUnmapFilePage(physicalAddress >> K_PAGE_BITS)) {
if (resumePosition) {
if (!unmapMaximum--) {
*resumePosition = i;
break;
}
}
}
}
}
MMArchInvalidatePages(virtualAddressStart, pageCount);
}
bool MMArchMapPage(MMSpace *space, uintptr_t physicalAddress, uintptr_t virtualAddress, unsigned flags) {
// TODO Use the no-execute bit.
if ((physicalAddress | virtualAddress) & (K_PAGE_SIZE - 1)) {
KernelPanic("MMArchMapPage - Address not page aligned.\n");
}
if (pmm.pageFrames && (physicalAddress >> K_PAGE_BITS) < pmm.pageFrameDatabaseCount) {
if (pmm.pageFrames[physicalAddress >> K_PAGE_BITS].state != MMPageFrame::ACTIVE
&& pmm.pageFrames[physicalAddress >> K_PAGE_BITS].state != MMPageFrame::UNUSABLE) {
KernelPanic("MMArchMapPage - Physical page frame %x not marked as ACTIVE or UNUSABLE.\n", physicalAddress);
}
}
if (!physicalAddress) {
KernelPanic("MMArchMapPage - Attempt to map physical page 0.\n");
} else if (!virtualAddress) {
KernelPanic("MMArchMapPage - Attempt to map virtual page 0.\n");
#ifdef ES_ARCH_X86_64
} else if (virtualAddress < 0xFFFF800000000000 && ProcessorReadCR3() != space->data.cr3) {
#else
} else if (virtualAddress < 0xC0000000 && ProcessorReadCR3() != space->data.cr3) {
#endif
KernelPanic("MMArchMapPage - Attempt to map page into other address space.\n");
}
bool acquireFrameLock = !(flags & (MM_MAP_PAGE_NO_NEW_TABLES | MM_MAP_PAGE_FRAME_LOCK_ACQUIRED));
if (acquireFrameLock) KMutexAcquire(&pmm.pageFrameMutex);
EsDefer(if (acquireFrameLock) KMutexRelease(&pmm.pageFrameMutex););
bool acquireSpaceLock = ~flags & MM_MAP_PAGE_NO_NEW_TABLES;
if (acquireSpaceLock) KMutexAcquire(&space->data.mutex);
EsDefer(if (acquireSpaceLock) KMutexRelease(&space->data.mutex));
// EsPrint("\tMap, %x -> %x\n", virtualAddress, physicalAddress);
uintptr_t oldVirtualAddress = virtualAddress;
#ifdef ES_ARCH_X86_64
physicalAddress &= 0xFFFFFFFFFFFFF000;
virtualAddress &= 0x0000FFFFFFFFF000;
#endif
#ifdef ES_ARCH_X86_64
uintptr_t indexL4 = virtualAddress >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 3);
uintptr_t indexL3 = virtualAddress >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 2);
#endif
uintptr_t indexL2 = virtualAddress >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 1);
uintptr_t indexL1 = virtualAddress >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 0);
if (space != coreMMSpace && space != kernelMMSpace /* Don't check the kernel's space since the bootloader's tables won't be committed. */) {
#ifdef ES_ARCH_X86_64
if (!(space->data.l3Commit[indexL4 >> 3] & (1 << (indexL4 & 7)))) KernelPanic("MMArchMapPage - Attempt to map using uncommitted L3 page table.\n");
if (!(space->data.l2Commit[indexL3 >> 3] & (1 << (indexL3 & 7)))) KernelPanic("MMArchMapPage - Attempt to map using uncommitted L2 page table.\n");
#endif
if (!(space->data.l1Commit[indexL2 >> 3] & (1 << (indexL2 & 7)))) KernelPanic("MMArchMapPage - Attempt to map using uncommitted L1 page table.\n");
}
#ifdef ES_ARCH_X86_64
if ((PAGE_TABLE_L4[indexL4] & 1) == 0) {
if (flags & MM_MAP_PAGE_NO_NEW_TABLES) KernelPanic("MMArchMapPage - NO_NEW_TABLES flag set, but a table was missing.\n");
PAGE_TABLE_L4[indexL4] = MMPhysicalAllocate(MM_PHYSICAL_ALLOCATE_LOCK_ACQUIRED) | 7;
ProcessorInvalidatePage((uintptr_t) (PAGE_TABLE_L3 + indexL3)); // Not strictly necessary.
EsMemoryZero((void *) ((uintptr_t) (PAGE_TABLE_L3 + indexL3) & ~(K_PAGE_SIZE - 1)), K_PAGE_SIZE);
space->data.pageTablesActive++;
}
if ((PAGE_TABLE_L3[indexL3] & 1) == 0) {
if (flags & MM_MAP_PAGE_NO_NEW_TABLES) KernelPanic("MMArchMapPage - NO_NEW_TABLES flag set, but a table was missing.\n");
PAGE_TABLE_L3[indexL3] = MMPhysicalAllocate(MM_PHYSICAL_ALLOCATE_LOCK_ACQUIRED) | 7;
ProcessorInvalidatePage((uintptr_t) (PAGE_TABLE_L2 + indexL2)); // Not strictly necessary.
EsMemoryZero((void *) ((uintptr_t) (PAGE_TABLE_L2 + indexL2) & ~(K_PAGE_SIZE - 1)), K_PAGE_SIZE);
space->data.pageTablesActive++;
}
#endif
if ((PAGE_TABLE_L2[indexL2] & 1) == 0) {
if (flags & MM_MAP_PAGE_NO_NEW_TABLES) KernelPanic("MMArchMapPage - NO_NEW_TABLES flag set, but a table was missing.\n");
PAGE_TABLE_L2[indexL2] = MMPhysicalAllocate(MM_PHYSICAL_ALLOCATE_LOCK_ACQUIRED) | 7;
ProcessorInvalidatePage((uintptr_t) (PAGE_TABLE_L1 + indexL1)); // Not strictly necessary.
EsMemoryZero((void *) ((uintptr_t) (PAGE_TABLE_L1 + indexL1) & ~(K_PAGE_SIZE - 1)), K_PAGE_SIZE);
space->data.pageTablesActive++;
}
uintptr_t oldValue = PAGE_TABLE_L1[indexL1];
uintptr_t value = physicalAddress | 3;
#ifdef ES_ARCH_X86_64
if (flags & MM_MAP_PAGE_WRITE_COMBINING) value |= 16; // This only works because we modified the PAT in SetupProcessor1.
#else
if (flags & MM_MAP_PAGE_WRITE_COMBINING) KernelPanic("MMArchMapPage - Write combining is unimplemented.\n"); // TODO.
#endif
if (flags & MM_MAP_PAGE_NOT_CACHEABLE) value |= 24;
if (flags & MM_MAP_PAGE_USER) value |= 7;
else value |= 1 << 8; // Global.
if (flags & MM_MAP_PAGE_READ_ONLY) value &= ~2;
if (flags & MM_MAP_PAGE_COPIED) value |= 1 << 9; // Ignored by the CPU.
// When the CPU accesses or writes to a page,
// it will modify the table entry to set the accessed or dirty bits respectively,
// but it uses its TLB entry as the assumed previous value of the entry.
// When unmapping pages we can't atomically remove an entry and do the TLB shootdown.
// This creates a race condition:
// 1. CPU 0 maps a page table entry. The dirty bit is not set.
// 2. CPU 1 reads from the page. A TLB entry is created with the dirty bit not set.
// 3. CPU 0 unmaps the entry.
// 4. CPU 1 writes to the page. As the TLB entry has the dirty bit cleared, it sets the entry to its cached entry ORed with the dirty bit.
// 5. CPU 0 invalidates the entry.
// That is, CPU 1 didn't realize the page was unmapped when it wrote out its entry, so the page becomes mapped again.
// To prevent this, we mark all pages with the dirty and accessed bits when we initially map them.
// (We don't use these bits for anything, anyway. They're basically useless on SMP systems, as far as I can tell.)
// That said, a CPU won't overwrite and clear a dirty bit when writing out its accessed flag (tested on Qemu);
// see here https://stackoverflow.com/questions/69024372/.
// Tl;dr: if a CPU ever sees an entry without these bits set, it can overwrite the entry with junk whenever it feels like it.
// TODO Should we be marking page tables as dirty/accessed? (Including those made by the 32-bit AND 64-bit bootloader and MMArchInitialise).
// When page table trimming is implemented, we'll probably need to do this.
value |= (1 << 5) | (1 << 6);
if ((oldValue & 1) && !(flags & MM_MAP_PAGE_OVERWRITE)) {
if (flags & MM_MAP_PAGE_IGNORE_IF_MAPPED) {
return false;
}
if ((oldValue & ~(K_PAGE_SIZE - 1)) != physicalAddress) {
KernelPanic("MMArchMapPage - Attempt to map %x to %x that has already been mapped to %x.\n",
virtualAddress, physicalAddress, oldValue & (~(K_PAGE_SIZE - 1)));
}
if (oldValue == value) {
KernelPanic("MMArchMapPage - Attempt to rewrite page translation.\n",
physicalAddress, virtualAddress, oldValue & (K_PAGE_SIZE - 1), value & (K_PAGE_SIZE - 1));
} else if (!(oldValue & 2) && (value & 2)) {
// The page has become writable.
} else {
KernelPanic("MMArchMapPage - Attempt to change flags mapping %x address %x from %x to %x.\n",
physicalAddress, virtualAddress, oldValue & (K_PAGE_SIZE - 1), value & (K_PAGE_SIZE - 1));
}
}
PAGE_TABLE_L1[indexL1] = value;
// We rely on this page being invalidated on this CPU in some places.
ProcessorInvalidatePage(oldVirtualAddress);
return true;
}
bool MMArchMakePageWritable(MMSpace *space, uintptr_t virtualAddress) {
KMutexAcquire(&space->data.mutex);
EsDefer(KMutexRelease(&space->data.mutex));
#ifdef ES_ARCH_X86_64
virtualAddress &= 0x0000FFFFFFFFF000;
#else
virtualAddress &= 0xFFFFF000;
#endif
#ifdef ES_ARCH_X86_64
uintptr_t indexL4 = virtualAddress >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 3);
if ((PAGE_TABLE_L4[indexL4] & 1) == 0) return false;
uintptr_t indexL3 = virtualAddress >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 2);
if ((PAGE_TABLE_L3[indexL3] & 1) == 0) return false;
#endif
uintptr_t indexL2 = virtualAddress >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 1);
if ((PAGE_TABLE_L2[indexL2] & 1) == 0) return false;
uintptr_t indexL1 = virtualAddress >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 0);
if ((PAGE_TABLE_L1[indexL1] & 1) == 0) return false;
PAGE_TABLE_L1[indexL1] |= 2;
return true;
}
void MMArchInitialise() {
coreMMSpace->data.cr3 = kernelMMSpace->data.cr3 = ProcessorReadCR3();
mmCoreRegions[0].baseAddress = MM_CORE_SPACE_START;
mmCoreRegions[0].pageCount = MM_CORE_SPACE_SIZE / K_PAGE_SIZE;
#ifdef ES_ARCH_X86_64
for (uintptr_t i = 0x100; i < 0x200; i++) {
if (PAGE_TABLE_L4[i] == 0) {
// We don't need to commit anything because the PMM isn't ready yet.
PAGE_TABLE_L4[i] = MMPhysicalAllocate(ES_FLAGS_DEFAULT) | 3;
EsMemoryZero((void *) (PAGE_TABLE_L3 + i * 0x200), K_PAGE_SIZE);
}
}
coreMMSpace->data.l1Commit = coreL1Commit;
KMutexAcquire(&coreMMSpace->reserveMutex);
kernelMMSpace->data.l1Commit = (uint8_t *) MMReserve(coreMMSpace, L1_COMMIT_SIZE_BYTES, MM_REGION_NORMAL | MM_REGION_NO_COMMIT_TRACKING | MM_REGION_FIXED)->baseAddress;
KMutexRelease(&coreMMSpace->reserveMutex);
#endif
}
uintptr_t MMArchTranslateAddress(MMSpace *, uintptr_t virtualAddress, bool writeAccess) {
// TODO This mutex will be necessary if we ever remove page tables.
// space->data.mutex.Acquire();
// EsDefer(space->data.mutex.Release());
#ifdef ES_ARCH_X86_64
virtualAddress &= 0x0000FFFFFFFFF000;
if ((PAGE_TABLE_L4[virtualAddress >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 3)] & 1) == 0) return 0;
if ((PAGE_TABLE_L3[virtualAddress >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 2)] & 1) == 0) return 0;
#endif
if ((PAGE_TABLE_L2[virtualAddress >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 1)] & 1) == 0) return 0;
uintptr_t physicalAddress = PAGE_TABLE_L1[virtualAddress >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 0)];
if (writeAccess && !(physicalAddress & 2)) return 0;
#ifdef ES_ARCH_X86_64
return (physicalAddress & 1) ? (physicalAddress & 0x0000FFFFFFFFF000) : 0;
#else
return (physicalAddress & 1) ? (physicalAddress & 0xFFFFF000) : 0;
#endif
}
uintptr_t ArchFindRootSystemDescriptorPointer() {
uint64_t uefiRSDP = *((uint64_t *) (LOW_MEMORY_MAP_START + GetBootloaderInformationOffset() + 0x7FE8));
if (uefiRSDP) {
return uefiRSDP;
}
PhysicalMemoryRegion searchRegions[2];
searchRegions[0].baseAddress = (uintptr_t) (((uint16_t *) LOW_MEMORY_MAP_START)[0x40E] << 4) + LOW_MEMORY_MAP_START;
searchRegions[0].pageCount = 0x400;
searchRegions[1].baseAddress = (uintptr_t) 0xE0000 + LOW_MEMORY_MAP_START;
searchRegions[1].pageCount = 0x20000;
for (uintptr_t i = 0; i < 2; i++) {
for (uintptr_t address = searchRegions[i].baseAddress;
address < searchRegions[i].baseAddress + searchRegions[i].pageCount;
address += 16) {
RootSystemDescriptorPointer *rsdp = (RootSystemDescriptorPointer *) address;
if (rsdp->signature != SIGNATURE_RSDP) {
continue;
}
if (rsdp->revision == 0) {
if (EsMemorySumBytes((uint8_t *) rsdp, 20)) {
continue;
}
return (uintptr_t) rsdp - LOW_MEMORY_MAP_START;
} else if (rsdp->revision == 2) {
if (EsMemorySumBytes((uint8_t *) rsdp, sizeof(RootSystemDescriptorPointer))) {
continue;
}
return (uintptr_t) rsdp - LOW_MEMORY_MAP_START;
}
}
}
return 0;
}
uint64_t ArchGetTimeFromPITMs() {
// TODO This isn't working on real hardware, but ArchDelay1Ms is?
// NOTE This will only work if called at least once every 50 ms.
// (The PIT only stores a 16-bit counter, which is depleted every 50 ms.)
static bool started = false;
static uint64_t cumulative = 0, last = 0;
if (!started) {
ProcessorOut8(IO_PIT_COMMAND, 0x30);
ProcessorOut8(IO_PIT_DATA, 0xFF);
ProcessorOut8(IO_PIT_DATA, 0xFF);
started = true;
last = 0xFFFF;
return 0;
} else {
ProcessorOut8(IO_PIT_COMMAND, 0x00);
uint16_t x = ProcessorIn8(IO_PIT_DATA);
x |= (ProcessorIn8(IO_PIT_DATA)) << 8;
cumulative += last - x;
if (x > last) cumulative += 0x10000;
last = x;
return cumulative * 1000 / 1193182;
}
}
void ArchDelay1Ms() {
ProcessorOut8(IO_PIT_COMMAND, 0x30);
ProcessorOut8(IO_PIT_DATA, 0xA9);
ProcessorOut8(IO_PIT_DATA, 0x04);
while (true) {
ProcessorOut8(IO_PIT_COMMAND, 0xE2);
if (ProcessorIn8(IO_PIT_DATA) & (1 << 7)) {
break;
}
}
}
NewProcessorStorage AllocateNewProcessorStorage(ArchCPU *archCPU) {
NewProcessorStorage storage = {};
storage.local = (CPULocalStorage *) EsHeapAllocate(sizeof(CPULocalStorage), true, K_FIXED);
#ifdef ES_ARCH_X86_64
storage.gdt = (uint32_t *) MMMapPhysical(kernelMMSpace, MMPhysicalAllocate(MM_PHYSICAL_ALLOCATE_COMMIT_NOW), K_PAGE_SIZE, ES_FLAGS_DEFAULT);
#endif
storage.local->archCPU = archCPU;
archCPU->local = storage.local;
scheduler.CreateProcessorThreads(storage.local);
archCPU->kernelProcessorID = storage.local->processorID;
return storage;
}
void SetupProcessor2(NewProcessorStorage *storage) {
// Setup the local interrupts for the current processor.
for (uintptr_t i = 0; i < acpi.lapicNMICount; i++) {
if (acpi.lapicNMIs[i].processor == 0xFF
|| acpi.lapicNMIs[i].processor == storage->local->archCPU->processorID) {
uint32_t registerIndex = (0x350 + (acpi.lapicNMIs[i].lintIndex << 4)) >> 2;
uint32_t value = 2 | (1 << 10); // NMI exception interrupt vector.
if (acpi.lapicNMIs[i].activeLow) value |= 1 << 13;
if (acpi.lapicNMIs[i].levelTriggered) value |= 1 << 15;
LapicWriteRegister(registerIndex, value);
}
}
LapicWriteRegister(0x350 >> 2, LapicReadRegister(0x350 >> 2) & ~(1 << 16));
LapicWriteRegister(0x360 >> 2, LapicReadRegister(0x360 >> 2) & ~(1 << 16));
LapicWriteRegister(0x080 >> 2, 0);
if (LapicReadRegister(0x30 >> 2) & 0x80000000) LapicWriteRegister(0x410 >> 2, 0);
LapicEndOfInterrupt();
// Configure the LAPIC's timer.
LapicWriteRegister(0x3E0 >> 2, 2); // Divisor = 16
// Create the processor's local storage.
ProcessorSetLocalStorage(storage->local);
// Setup a GDT and TSS for the processor.
#ifdef ES_ARCH_X86_64
uint32_t *gdt = storage->gdt;
void *bootstrapGDT = (void *) (((uint64_t *) ((uint16_t *) processorGDTR + 1))[0]);
EsMemoryCopy(gdt, bootstrapGDT, 2048);
uint32_t *tss = (uint32_t *) ((uint8_t *) storage->gdt + 2048);
storage->local->archCPU->kernelStack = (void **) (tss + 1);
ProcessorInstallTSS(gdt, tss);
#endif
}
void ArchInitialise() {
ACPIParseTables();
uint8_t bootstrapLapicID = (LapicReadRegister(0x20 >> 2) >> 24);
ArchCPU *currentCPU = nullptr;
for (uintptr_t i = 0; i < acpi.processorCount; i++) {
if (acpi.processors[i].apicID == bootstrapLapicID) {
// That's us!
currentCPU = acpi.processors + i;
currentCPU->bootProcessor = true;
break;
}
}
if (!currentCPU) {
KernelPanic("ArchInitialise - Could not find the bootstrap processor\n");
}
// Calibrate the LAPIC's timer and processor's timestamp counter.
ProcessorDisableInterrupts();
uint64_t start = ProcessorReadTimeStamp();
LapicWriteRegister(0x380 >> 2, (uint32_t) -1);
for (int i = 0; i < 8; i++) ArchDelay1Ms(); // Average over 8ms
acpi.lapicTicksPerMs = ((uint32_t) -1 - LapicReadRegister(0x390 >> 2)) >> 4;
EsRandomAddEntropy(LapicReadRegister(0x390 >> 2));
uint64_t end = ProcessorReadTimeStamp();
timeStampTicksPerMs = (end - start) >> 3;
ProcessorEnableInterrupts();
// EsPrint("timeStampTicksPerMs = %d\n", timeStampTicksPerMs);
// Finish processor initialisation.
// This sets up interrupts, the timer, CPULocalStorage, the GDT and TSS,
// and registers the processor with the scheduler.
NewProcessorStorage storage = AllocateNewProcessorStorage(currentCPU);
SetupProcessor2(&storage);
}
size_t ProcessorSendIPI(uintptr_t interrupt, bool nmi, int processorID) {
// It's possible that another CPU is trying to send an IPI at the same time we want to send the panic IPI.
// TODO What should we do in this case?
if (interrupt != KERNEL_PANIC_IPI) KSpinlockAssertLocked(&ipiLock);
// Note: We send IPIs at a special priority that ProcessorDisableInterrupts doesn't mask.
size_t ignored = 0;
for (uintptr_t i = 0; i < acpi.processorCount; i++) {
ArchCPU *processor = acpi.processors + i;
if (processorID != -1) {
if (processorID != processor->kernelProcessorID) {
ignored++;
continue;
}
} else {
if (processor == GetLocalStorage()->archCPU || !processor->local || !processor->local->schedulerReady) {
ignored++;
continue;
}
}
uint32_t destination = acpi.processors[i].apicID << 24;
uint32_t command = interrupt | (1 << 14) | (nmi ? 0x400 : 0);
LapicWriteRegister(0x310 >> 2, destination);
LapicWriteRegister(0x300 >> 2, command);
// Wait for the interrupt to be sent.
while (LapicReadRegister(0x300 >> 2) & (1 << 12));
}
return ignored;
}
void ProcessorSendYieldIPI(Thread *thread) {
thread->receivedYieldIPI = false;
KSpinlockAcquire(&ipiLock);
ProcessorSendIPI(YIELD_IPI, false);
KSpinlockRelease(&ipiLock);
while (!thread->receivedYieldIPI); // Spin until the thread gets the IPI.
}
void ArchNextTimer(size_t ms) {
while (!scheduler.started); // Wait until the scheduler is ready.
GetLocalStorage()->schedulerReady = true; // Make sure this CPU can be scheduled.
LapicNextTimer(ms); // Set the next timer.
}
uint64_t ArchGetTimeMs() {
// Update the time stamp counter synchronization value.
timeStampCounterSynchronizationValue = ((timeStampCounterSynchronizationValue & 0x8000000000000000)
^ 0x8000000000000000) | ProcessorReadTimeStamp();
#ifdef ES_ARCH_X86_64
if (acpi.hpetBaseAddress && acpi.hpetPeriod) {
__int128 fsToMs = 1000000000000;
__int128 reading = acpi.hpetBaseAddress[30];
return (uint64_t) (reading * (__int128) acpi.hpetPeriod / fsToMs);
}
#endif
return ArchGetTimeFromPITMs();
}
extern "C" bool PostContextSwitch(InterruptContext *context, MMSpace *oldAddressSpace) {
Thread *currentThread = GetCurrentThread();
#ifdef ES_ARCH_X86_64
CPULocalStorage *local = GetLocalStorage();
void *kernelStack = (void *) currentThread->kernelStack;
*local->archCPU->kernelStack = kernelStack;
#endif
bool newThread = currentThread->cpuTimeSlices == 1;
LapicEndOfInterrupt();
ContextSanityCheck(context);
if (ProcessorAreInterruptsEnabled()) {
KernelPanic("PostContextSwitch - Interrupts were enabled. (1)\n");
}
#ifdef ES_ARCH_X86_64
KernelLog(LOG_VERBOSE, "Arch", "context switch", "Context switch to %zthread %x at %x\n", newThread ? "new " : "", currentThread, context->rip);
currentThread->lastKnownExecutionAddress = context->rip;
#else
KernelLog(LOG_VERBOSE, "Arch", "context switch", "Context switch to %zthread %x at %x\n", newThread ? "new " : "", currentThread, context->eip);
currentThread->lastKnownExecutionAddress = context->eip;
#endif
if (scheduler.lock.interruptsEnabled) {
KernelPanic("PostContextSwitch - Interrupts were enabled. (3)\n");
}
ProcessorSetThreadStorage(currentThread->tlsAddress);
// We can only free the scheduler's spinlock when we are no longer using the stack
// from the previous thread. See DoContextSwitch.
// (Another CPU can KillThread this once it's back in activeThreads.)
KSpinlockRelease(&scheduler.lock, true);
if (ProcessorAreInterruptsEnabled()) {
KernelPanic("PostContextSwitch - Interrupts were enabled. (2)\n");
}
MMSpaceCloseReference(oldAddressSpace);
#ifdef ES_ARCH_X86_32
if (context->fromRing0) {
// Returning to a kernel thread; we need to fix the stack.
uint32_t irq = context->esp;
uint32_t errorCode = context->ss;
context->ss = context->flags;
context->esp = context->cs;
context->flags = context->eip;
context->cs = context->errorCode;
context->eip = context->irq;
context->irq = irq;
context->errorCode = errorCode;
}
#endif
return newThread;
}
bool SetupInterruptRedirectionEntry(uintptr_t _line) {
KSpinlockAssertLocked(&scheduler.lock);
static uint32_t alreadySetup = 0;
if (alreadySetup & (1 << _line)) {
return true;
}
// Work out which interrupt the IoApic will sent to the processor.
// TODO Use the upper 4 bits for IRQ priority.
uintptr_t line = _line;
uintptr_t thisProcessorIRQ = line + IRQ_BASE;
bool activeLow = false;
bool levelTriggered = true;
// If there was an interrupt override entry in the MADT table,
// then we'll have to use that number instead.
for (uintptr_t i = 0; i < acpi.interruptOverrideCount; i++) {
ACPIInterruptOverride *interruptOverride = acpi.interruptOverrides + i;
if (interruptOverride->sourceIRQ == line) {
line = interruptOverride->gsiNumber;
activeLow = interruptOverride->activeLow;
levelTriggered = interruptOverride->levelTriggered;
break;
}
}
KernelLog(LOG_INFO, "Arch", "IRQ flags", "SetupInterruptRedirectionEntry - IRQ %d is active %z, %z triggered.\n",
line, activeLow ? "low" : "high", levelTriggered ? "level" : "edge");
ACPIIoApic *ioApic;
bool foundIoApic = false;
// Look for the IoApic to which this interrupt is sent.
for (uintptr_t i = 0; i < acpi.ioapicCount; i++) {
ioApic = acpi.ioApics + i;
if (line >= ioApic->gsiBase && line < (ioApic->gsiBase + (0xFF & (ACPIIoApicReadRegister(ioApic, 1) >> 16)))) {
foundIoApic = true;
line -= ioApic->gsiBase;
break;
}
}
// We couldn't find the IoApic that handles this interrupt.
if (!foundIoApic) {
KernelLog(LOG_ERROR, "Arch", "no IOAPIC", "SetupInterruptRedirectionEntry - Could not find an IOAPIC handling interrupt line %d.\n", line);
return false;
}
// A normal priority interrupt.
uintptr_t redirectionTableIndex = line * 2 + 0x10;
uint32_t redirectionEntry = thisProcessorIRQ;
if (activeLow) redirectionEntry |= (1 << 13);
if (levelTriggered) redirectionEntry |= (1 << 15);
// Send the interrupt to the processor that registered the interrupt.
ACPIIoApicWriteRegister(ioApic, redirectionTableIndex, 1 << 16); // Mask the interrupt while we modify the entry.
ACPIIoApicWriteRegister(ioApic, redirectionTableIndex + 1, GetLocalStorage()->archCPU->apicID << 24);
ACPIIoApicWriteRegister(ioApic, redirectionTableIndex, redirectionEntry);
alreadySetup |= 1 << _line;
return true;
}
void KUnregisterMSI(uintptr_t tag) {
KSpinlockAcquire(&irqHandlersLock);
EsDefer(KSpinlockRelease(&irqHandlersLock));
msiHandlers[tag].callback = nullptr;
}
KMSIInformation KRegisterMSI(KIRQHandler handler, void *context, const char *cOwnerName) {
KSpinlockAcquire(&irqHandlersLock);
EsDefer(KSpinlockRelease(&irqHandlersLock));
for (uintptr_t i = 0; i < INTERRUPT_VECTOR_MSI_COUNT; i++) {
if (msiHandlers[i].callback) continue;
msiHandlers[i] = { handler, context };
// TODO Selecting the best target processor.
// Currently this sends everything to processor 0.
KernelLog(LOG_INFO, "Arch", "register MSI", "Register MSI with vector %X for '%z'.\n",
INTERRUPT_VECTOR_MSI_START + i, cOwnerName);
return {
.address = 0xFEE00000,
.data = INTERRUPT_VECTOR_MSI_START + i,
.tag = i,
};
}
return {};
}
bool KRegisterIRQ(intptr_t line, KIRQHandler handler, void *context, const char *cOwnerName, KPCIDevice *pciDevice) {
KSpinlockAcquire(&scheduler.lock);
EsDefer(KSpinlockRelease(&scheduler.lock));
if (line == -1 && !pciDevice) {
KernelPanic("KRegisterIRQ - Interrupt line is %d, and pciDevice is %x.\n", line, pciDevice);
}
// Save the handler callback and context.
if (line > 0x20 || line < -1) KernelPanic("KRegisterIRQ - Unexpected IRQ %d\n", line);
bool found = false;
KSpinlockAcquire(&irqHandlersLock);
for (uintptr_t i = 0; i < sizeof(irqHandlers) / sizeof(irqHandlers[0]); i++) {
if (!irqHandlers[i].callback) {
found = true;
irqHandlers[i].callback = handler;
irqHandlers[i].context = context;
irqHandlers[i].line = line;
irqHandlers[i].pciDevice = pciDevice;
irqHandlers[i].cOwnerName = cOwnerName;
break;
}
}
KSpinlockRelease(&irqHandlersLock);
if (!found) {
KernelLog(LOG_ERROR, "Arch", "too many IRQ handlers", "The limit of IRQ handlers was reached (%d), and the handler for '%z' was not registered.\n",
sizeof(irqHandlers) / sizeof(irqHandlers[0]), cOwnerName);
return false;
}
KernelLog(LOG_INFO, "Arch", "register IRQ", "KRegisterIRQ - Registered IRQ %d to '%z'.\n", line, cOwnerName);
if (line != -1) {
if (!SetupInterruptRedirectionEntry(line)) {
return false;
}
} else {
SetupInterruptRedirectionEntry(9);
SetupInterruptRedirectionEntry(10);
SetupInterruptRedirectionEntry(11);
}
return true;
}

View File

@ -17,6 +17,7 @@
#define IO_PS2_COMMAND (0x0064) #define IO_PS2_COMMAND (0x0064)
#define IO_RTC_INDEX (0x0070) #define IO_RTC_INDEX (0x0070)
#define IO_RTC_DATA (0x0071) #define IO_RTC_DATA (0x0071)
#define IO_UNUSED_DELAY (0x0080)
#define IO_PIC_2_COMMAND (0x00A0) #define IO_PIC_2_COMMAND (0x00A0)
#define IO_PIC_2_DATA (0x00A1) #define IO_PIC_2_DATA (0x00A1)
#define IO_BGA_INDEX (0x01CE) #define IO_BGA_INDEX (0x01CE)
@ -47,9 +48,51 @@
#define IO_PCI_CONFIG (0x0CF8) #define IO_PCI_CONFIG (0x0CF8)
#define IO_PCI_DATA (0x0CFC) #define IO_PCI_DATA (0x0CFC)
// --------------------------------- Interrupt vectors.
// Interrupt vectors:
// 0x00 - 0x1F: CPU exceptions
// 0x20 - 0x2F: PIC (disabled, spurious)
// 0x30 - 0x4F: Timers and low-priority IPIs.
// 0x50 - 0x6F: APIC (standard)
// 0x70 - 0xAF: MSI
// 0xF0 - 0xFE: High-priority IPIs
// 0xFF: APIC (spurious interrupt)
#define TIMER_INTERRUPT (0x40)
#define YIELD_IPI (0x41)
#define IRQ_BASE (0x50)
#define CALL_FUNCTION_ON_ALL_PROCESSORS_IPI (0xF0)
#define TLB_SHOOTDOWN_IPI (0xF1)
#define KERNEL_PANIC_IPI (0) // NMIs ignore the interrupt vector.
#define INTERRUPT_VECTOR_MSI_START (0x70)
#define INTERRUPT_VECTOR_MSI_COUNT (0x40)
// --------------------------------- Forward declarations. // --------------------------------- Forward declarations.
struct NewProcessorStorage {
struct CPULocalStorage *local;
uint32_t *gdt;
};
uint8_t ACPIGetCenturyRegisterIndex(); uint8_t ACPIGetCenturyRegisterIndex();
uintptr_t GetBootloaderInformationOffset(); uintptr_t GetBootloaderInformationOffset();
extern "C" void ProcessorDebugOutputByte(uint8_t byte);
extern uintptr_t bootloaderInformationOffset;
uintptr_t ArchFindRootSystemDescriptorPointer();
void ArchStartupApplicationProcessors();
uint32_t LapicReadRegister(uint32_t reg);
void LapicWriteRegister(uint32_t reg, uint32_t value);
NewProcessorStorage AllocateNewProcessorStorage(struct ArchCPU *archCPU);
extern "C" void SetupProcessor2(struct NewProcessorStorage *);
void ArchDelay1Ms(); // Spin for approximately 1ms. Use only during initialisation. Not thread-safe.
uint64_t ArchGetTimeFromPITMs();
void *ACPIGetRSDP();
size_t ProcessorSendIPI(uintptr_t interrupt, bool nmi = false, int processorID = -1); // Returns the number of processors the IPI was *not* sent to.
void ArchSetPCIIRQLine(uint8_t slot, uint8_t pin, uint8_t line);
extern "C" void ProcessorReset();
void MMArchInvalidatePages(uintptr_t virtualAddressStart, uintptr_t pageCount);
void ContextSanityCheck(struct InterruptContext *context);
#endif #endif

View File

@ -245,7 +245,9 @@ void ACPIParseTables() {
uintptr_t startLength = length; uintptr_t startLength = length;
uint8_t *data = (uint8_t *) (madt + 1); uint8_t *data = (uint8_t *) (madt + 1);
#ifdef ES_ARCH_X86_64
acpi.lapicAddress = (uint32_t volatile *) ACPIMapPhysicalMemory(madt->lapicAddress, 0x10000); acpi.lapicAddress = (uint32_t volatile *) ACPIMapPhysicalMemory(madt->lapicAddress, 0x10000);
#endif
while (length && length <= startLength) { while (length && length <= startLength) {
uint8_t entryType = data[0]; uint8_t entryType = data[0];

View File

@ -281,7 +281,11 @@ bool AHCIController::Access(uintptr_t portIndex, uint64_t offsetBytes, size_t co
KDMASegment segment = KDMABufferNextSegment(buffer); KDMASegment segment = KDMABufferNextSegment(buffer);
prdt[0 + 4 * prdtEntryCount] = segment.physicalAddress; prdt[0 + 4 * prdtEntryCount] = segment.physicalAddress;
#ifdef ES_BITS_64
prdt[1 + 4 * prdtEntryCount] = segment.physicalAddress >> 32; prdt[1 + 4 * prdtEntryCount] = segment.physicalAddress >> 32;
#else
prdt[1 + 4 * prdtEntryCount] = 0;
#endif
prdt[2 + 4 * prdtEntryCount] = 0; prdt[2 + 4 * prdtEntryCount] = 0;
prdt[3 + 4 * prdtEntryCount] = (segment.byteCount - 1) | (segment.isLast ? (1 << 31) /* IRQ when done */ : 0); prdt[3 + 4 * prdtEntryCount] = (segment.byteCount - 1) | (segment.isLast ? (1 << 31) /* IRQ when done */ : 0);
@ -572,9 +576,14 @@ void AHCIController::Initialise() {
// Set the registers to the physical addresses. // Set the registers to the physical addresses.
WR_REGISTER_PCLB(i, physicalAddress); WR_REGISTER_PCLB(i, physicalAddress);
if (dma64Supported) WR_REGISTER_PCLBU(i, physicalAddress >> 32);
WR_REGISTER_PFB(i, (physicalAddress + 0x400)); WR_REGISTER_PFB(i, (physicalAddress + 0x400));
#ifdef ES_BITS_64
if (dma64Supported) WR_REGISTER_PCLBU(i, physicalAddress >> 32);
if (dma64Supported) WR_REGISTER_PFBU(i, (physicalAddress + 0x400) >> 32); if (dma64Supported) WR_REGISTER_PFBU(i, (physicalAddress + 0x400) >> 32);
#else
if (dma64Supported) WR_REGISTER_PCLBU(i, 0);
if (dma64Supported) WR_REGISTER_PFBU(i, 0);
#endif
// Point each command list entry to the corresponding command table. // Point each command list entry to the corresponding command table.
@ -583,7 +592,11 @@ void AHCIController::Initialise() {
for (uintptr_t j = 0; j < commandSlotCount; j++) { for (uintptr_t j = 0; j < commandSlotCount; j++) {
uintptr_t address = physicalAddress + COMMAND_LIST_SIZE + RECEIVED_FIS_SIZE + COMMAND_TABLE_SIZE * j; uintptr_t address = physicalAddress + COMMAND_LIST_SIZE + RECEIVED_FIS_SIZE + COMMAND_TABLE_SIZE * j;
commandList[j * 8 + 2] = address; commandList[j * 8 + 2] = address;
#ifdef ES_BITS_64
commandList[j * 8 + 3] = address >> 32; commandList[j * 8 + 3] = address >> 32;
#else
commandList[j * 8 + 3] = 0;
#endif
} }
// Reset the port. // Reset the port.
@ -725,7 +738,11 @@ void AHCIController::Initialise() {
uint32_t *prdt = (uint32_t *) (ports[i].commandTables + 0x80); uint32_t *prdt = (uint32_t *) (ports[i].commandTables + 0x80);
prdt[0] = identifyDataPhysical; prdt[0] = identifyDataPhysical;
#ifdef ES_BITS_64
prdt[1] = identifyDataPhysical >> 32; prdt[1] = identifyDataPhysical >> 32;
#else
prdt[1] = 0;
#endif
prdt[2] = 0; prdt[2] = 0;
prdt[3] = 0x200 - 1; prdt[3] = 0x200 - 1;

View File

@ -105,7 +105,7 @@ static EsError FindDirectoryEntryReferenceFromIndex(Volume *volume, uint8_t *buf
} }
} }
static bool ValidateDirectoryEntry(DirectoryEntry *entry) { static bool ValidateDirectoryEntry(Volume *volume, DirectoryEntry *entry) {
uint32_t checksum = entry->checksum; uint32_t checksum = entry->checksum;
entry->checksum = 0; entry->checksum = 0;
uint32_t calculated = CalculateCRC32(entry, sizeof(DirectoryEntry)); uint32_t calculated = CalculateCRC32(entry, sizeof(DirectoryEntry));
@ -129,6 +129,8 @@ static bool ValidateDirectoryEntry(DirectoryEntry *entry) {
ESFS_CHECK(data->count == entry->fileSize, "ValidateDirectoryEntry - Expected direct attribute to cover entire file."); ESFS_CHECK(data->count == entry->fileSize, "ValidateDirectoryEntry - Expected direct attribute to cover entire file.");
} }
} else if (attribute->type == ESFS_ATTRIBUTE_DIRECTORY) { } else if (attribute->type == ESFS_ATTRIBUTE_DIRECTORY) {
AttributeDirectory *directory = (AttributeDirectory *) attribute;
ESFS_CHECK(directory->indexRootBlock < volume->superblock.blockCount, "ValidateDirectoryEntry - Directory index root block outside volume.");
} else if (attribute->type == ESFS_ATTRIBUTE_FILENAME) { } else if (attribute->type == ESFS_ATTRIBUTE_FILENAME) {
AttributeFilename *filename = (AttributeFilename *) attribute; AttributeFilename *filename = (AttributeFilename *) attribute;
ESFS_CHECK(filename->length + 8 <= filename->size, "ValidateDirectoryEntry - Filename too long."); ESFS_CHECK(filename->length + 8 <= filename->size, "ValidateDirectoryEntry - Filename too long.");
@ -350,7 +352,7 @@ static void Sync(KNode *_directory, KNode *node) {
return; return;
} }
if (!ValidateDirectoryEntry((DirectoryEntry *) (blockBuffer + file->reference.offsetIntoBlock))) { if (!ValidateDirectoryEntry(volume, (DirectoryEntry *) (blockBuffer + file->reference.offsetIntoBlock))) {
return; return;
} }
@ -393,7 +395,7 @@ static EsError Enumerate(KNode *node) {
for (uint64_t j = 0; j < entriesInThisBlock; j++, reference.offsetIntoBlock += sizeof(DirectoryEntry)) { for (uint64_t j = 0; j < entriesInThisBlock; j++, reference.offsetIntoBlock += sizeof(DirectoryEntry)) {
DirectoryEntry *entry = (DirectoryEntry *) blockBuffer + j; DirectoryEntry *entry = (DirectoryEntry *) blockBuffer + j;
if (!ValidateDirectoryEntry(entry)) { if (!ValidateDirectoryEntry(volume, entry)) {
// Try the entries in the next block. // Try the entries in the next block.
break; break;
} }
@ -1635,7 +1637,7 @@ static bool CreateInternal(const char *name, size_t nameLength, EsNodeType type,
entry->checksum = 0; entry->checksum = 0;
entry->checksum = CalculateCRC32(entry, sizeof(DirectoryEntry)); entry->checksum = CalculateCRC32(entry, sizeof(DirectoryEntry));
if (!ValidateDirectoryEntry(entry)) KernelPanic("EsFS::CreateInternal - Created directory entry is invalid.\n"); if (!ValidateDirectoryEntry(volume, entry)) KernelPanic("EsFS::CreateInternal - Created directory entry is invalid.\n");
// Write the directory entry. // Write the directory entry.
@ -1670,7 +1672,7 @@ static EsError Move(KNode *_oldDirectory, KNode *_file, KNode *_newDirectory, co
file->entry.checksum = 0; file->entry.checksum = 0;
file->entry.checksum = CalculateCRC32(&file->entry, sizeof(DirectoryEntry)); file->entry.checksum = CalculateCRC32(&file->entry, sizeof(DirectoryEntry));
if (!ValidateDirectoryEntry(&file->entry)) KernelPanic("EsFS::Move - Existing entry is invalid.\n"); if (!ValidateDirectoryEntry(volume, &file->entry)) KernelPanic("EsFS::Move - Existing entry is invalid.\n");
uint8_t *buffers = (uint8_t *) EsHeapAllocate(superblock->blockSize * 2, true, K_FIXED); uint8_t *buffers = (uint8_t *) EsHeapAllocate(superblock->blockSize * 2, true, K_FIXED);
if (!buffers) return ES_ERROR_INSUFFICIENT_RESOURCES; if (!buffers) return ES_ERROR_INSUFFICIENT_RESOURCES;
@ -1727,7 +1729,7 @@ static EsError Load(KNode *_directory, KNode *_node, KNodeMetadata *, const void
DirectoryEntry *entry = (DirectoryEntry *) (blockBuffer + reference.offsetIntoBlock); DirectoryEntry *entry = (DirectoryEntry *) (blockBuffer + reference.offsetIntoBlock);
if (!ValidateDirectoryEntry(entry)) { if (!ValidateDirectoryEntry(directory->volume, entry)) {
return ES_ERROR_CORRUPT_DATA; return ES_ERROR_CORRUPT_DATA;
} }
@ -1788,7 +1790,7 @@ static EsError Scan(const char *name, size_t nameLength, KNode *_directory) {
} }
DirectoryEntry *entry = (DirectoryEntry *) (blockBuffer + reference.offsetIntoBlock); DirectoryEntry *entry = (DirectoryEntry *) (blockBuffer + reference.offsetIntoBlock);
if (!ValidateDirectoryEntry(entry)) return ES_ERROR_CORRUPT_DATA; if (!ValidateDirectoryEntry(volume, entry)) return ES_ERROR_CORRUPT_DATA;
if ((entry->nodeType == ESFS_NODE_TYPE_DIRECTORY && !FindAttribute(entry, ESFS_ATTRIBUTE_DIRECTORY)) if ((entry->nodeType == ESFS_NODE_TYPE_DIRECTORY && !FindAttribute(entry, ESFS_ATTRIBUTE_DIRECTORY))
|| (entry->nodeType == ESFS_NODE_TYPE_FILE && !FindAttribute(entry, ESFS_ATTRIBUTE_DATA))) { || (entry->nodeType == ESFS_NODE_TYPE_FILE && !FindAttribute(entry, ESFS_ATTRIBUTE_DATA))) {
@ -1907,7 +1909,7 @@ static bool Mount(Volume *volume, EsFileOffsetDifference *rootDirectoryChildren)
} }
DirectoryEntry *entry = (DirectoryEntry *) (blockBuffer + rootReference.offsetIntoBlock); DirectoryEntry *entry = (DirectoryEntry *) (blockBuffer + rootReference.offsetIntoBlock);
if (!ValidateDirectoryEntry(entry)) goto failure; if (!ValidateDirectoryEntry(volume, entry)) goto failure;
AttributeDirectory *directory = (AttributeDirectory *) FindAttribute(entry, ESFS_ATTRIBUTE_DIRECTORY); AttributeDirectory *directory = (AttributeDirectory *) FindAttribute(entry, ESFS_ATTRIBUTE_DIRECTORY);
if (!directory || !FindAttribute(entry, ESFS_ATTRIBUTE_DATA)) { if (!directory || !FindAttribute(entry, ESFS_ATTRIBUTE_DATA)) {

View File

@ -477,7 +477,7 @@ void NVMeController::Initialise() {
return; return;
} }
if (~capabilities & (1UL << 37)) { if (~capabilities & (1ULL << 37)) {
KernelLog(LOG_ERROR, "NVMe", "unsupported capabilities", "Controller does not support NVMe command set.\n"); KernelLog(LOG_ERROR, "NVMe", "unsupported capabilities", "Controller does not support NVMe command set.\n");
return; return;
} }

View File

@ -287,7 +287,11 @@ bool KPCIDevice::EnableMSI(KIRQHandler irqHandler, void *context, const char *cO
WriteConfig32(pointer + 4, msi.address & 0xFFFFFFFF); WriteConfig32(pointer + 4, msi.address & 0xFFFFFFFF);
if (control & (1 << 7)) { if (control & (1 << 7)) {
#ifdef ES_BITS_64
WriteConfig32(pointer + 8, msi.address >> 32); WriteConfig32(pointer + 8, msi.address >> 32);
#else
WriteConfig32(pointer + 8, 0);
#endif
WriteConfig16(pointer + 12, (ReadConfig16(pointer + 12) & 0x3800) | msi.data); WriteConfig16(pointer + 12, (ReadConfig16(pointer + 12) & 0x3800) | msi.data);
if (control & (1 << 8)) WriteConfig32(pointer + 16, 0); if (control & (1 << 8)) WriteConfig32(pointer + 16, 0);
} else { } else {

View File

@ -596,7 +596,7 @@ bool XHCIController::HandleIRQ() {
uint8_t completionCode = (dw2 >> 24) & 0xFF; uint8_t completionCode = (dw2 >> 24) & 0xFF;
KernelLog(LOG_VERBOSE, "xHCI", "got event", "Received event of type %d with code %d from %x.\n", KernelLog(LOG_VERBOSE, "xHCI", "got event", "Received event of type %d with code %d from %x.\n",
type, completionCode, (uintptr_t) dw0 | ((uintptr_t) dw1 << 32)); type, completionCode, (uint64_t) dw0 | ((uint64_t) dw1 << 32));
if (type == 32 /* transfer completion event */) { if (type == 32 /* transfer completion event */) {
uint8_t slotID = (dw3 >> 24) & 0xFF; uint8_t slotID = (dw3 >> 24) & 0xFF;

View File

@ -131,7 +131,6 @@ extern "C" {
void MMArchUnmapPages(MMSpace *space, uintptr_t virtualAddressStart, uintptr_t pageCount, unsigned flags, size_t unmapMaximum = 0, uintptr_t *resumePosition = nullptr); void MMArchUnmapPages(MMSpace *space, uintptr_t virtualAddressStart, uintptr_t pageCount, unsigned flags, size_t unmapMaximum = 0, uintptr_t *resumePosition = nullptr);
bool MMArchMakePageWritable(MMSpace *space, uintptr_t virtualAddress); bool MMArchMakePageWritable(MMSpace *space, uintptr_t virtualAddress);
bool MMArchHandlePageFault(uintptr_t address, uint32_t flags); bool MMArchHandlePageFault(uintptr_t address, uint32_t flags);
void MMArchInvalidatePages(uintptr_t virtualAddressStart, uintptr_t pageCount);
bool MMArchIsBufferInUserRange(uintptr_t baseAddress, size_t byteCount); bool MMArchIsBufferInUserRange(uintptr_t baseAddress, size_t byteCount);
bool MMArchSafeCopy(uintptr_t destinationAddress, uintptr_t sourceAddress, size_t byteCount); // Returns false if a page fault occured during the copy. bool MMArchSafeCopy(uintptr_t destinationAddress, uintptr_t sourceAddress, size_t byteCount); // Returns false if a page fault occured during the copy.
bool MMArchCommitPageTables(MMSpace *space, struct MMRegion *region); bool MMArchCommitPageTables(MMSpace *space, struct MMRegion *region);

View File

@ -2282,10 +2282,10 @@ void MMInitialise() {
KMutexRelease(&kernelMMSpace->reserveMutex); KMutexRelease(&kernelMMSpace->reserveMutex);
// 1 extra for the top page, then round up so the page bitset is byte-aligned. // 1 extra for the top page, then round up so the page bitset is byte-aligned.
pmm.pageFrameDatabaseCount = (MMArchGetPhysicalMemoryHighest() + (K_PAGE_SIZE << 3)) >> K_PAGE_BITS; uintptr_t pageFrameDatabaseCount = (MMArchGetPhysicalMemoryHighest() + (K_PAGE_SIZE << 3)) >> K_PAGE_BITS;
pmm.pageFrames = (MMPageFrame *) MMStandardAllocate(kernelMMSpace, pageFrameDatabaseCount * sizeof(MMPageFrame), MM_REGION_FIXED);
pmm.pageFrames = (MMPageFrame *) MMStandardAllocate(kernelMMSpace, pmm.pageFrameDatabaseCount * sizeof(MMPageFrame), MM_REGION_FIXED); pmm.freeOrZeroedPageBitset.Initialise(pageFrameDatabaseCount, true);
pmm.freeOrZeroedPageBitset.Initialise(pmm.pageFrameDatabaseCount, true); pmm.pageFrameDatabaseCount = pageFrameDatabaseCount; // Only set this after the database is ready, or it may be accessed mid-allocation!
MMPhysicalInsertFreePagesStart(); MMPhysicalInsertFreePagesStart();
uint64_t commitLimit = MMArchPopulatePageFrameDatabase(); uint64_t commitLimit = MMArchPopulatePageFrameDatabase();

View File

@ -396,7 +396,11 @@ Thread *Scheduler::SpawnThread(const char *cName, uintptr_t startAddress, uintpt
thread->handles = 2; thread->handles = 2;
// Allocate the thread's stacks. // Allocate the thread's stacks.
#if defined(ES_BITS_64)
uintptr_t kernelStackSize = userland ? 0x4000 /* 16KB */ : 0x10000 /* 64KB */; uintptr_t kernelStackSize = userland ? 0x4000 /* 16KB */ : 0x10000 /* 64KB */;
#elif defined(ES_BITS_32)
uintptr_t kernelStackSize = userland ? 0x3000 /* 12KB */ : 0x8000 /* 32KB */;
#endif
uintptr_t userStackReserve = userland ? 0x400000 /* 4MB */ : kernelStackSize; uintptr_t userStackReserve = userland ? 0x400000 /* 4MB */ : kernelStackSize;
uintptr_t userStackCommit = userland ? 0x20000 /* 128KB */ : 0; uintptr_t userStackCommit = userland ? 0x20000 /* 128KB */ : 0;
uintptr_t stack = 0, kernelStack = (uintptr_t) MMStandardAllocate(kernelMMSpace, kernelStackSize, MM_REGION_FIXED); uintptr_t stack = 0, kernelStack = (uintptr_t) MMStandardAllocate(kernelMMSpace, kernelStackSize, MM_REGION_FIXED);
@ -781,6 +785,7 @@ void Thread::SetAddressSpace(MMSpace *space) {
KSpinlockAcquire(&scheduler.lock); KSpinlockAcquire(&scheduler.lock);
MMSpace *oldSpace = temporaryAddressSpace ?: kernelMMSpace; MMSpace *oldSpace = temporaryAddressSpace ?: kernelMMSpace;
EsPrint("space = %x, oldSpace = %x\n", space, oldSpace);
temporaryAddressSpace = space; temporaryAddressSpace = space;
MMSpace *newSpace = space ?: kernelMMSpace; MMSpace *newSpace = space ?: kernelMMSpace;
MMSpaceOpenReference(newSpace); MMSpaceOpenReference(newSpace);

View File

@ -16,7 +16,6 @@ KMutex printLock;
#endif #endif
void DebugWriteCharacter(uintptr_t character); void DebugWriteCharacter(uintptr_t character);
extern "C" void ProcessorDebugOutputByte(uint8_t byte);
int KWaitKey(); int KWaitKey();
#if defined(ES_ARCH_X86_32) || defined(ES_ARCH_X86_64) #if defined(ES_ARCH_X86_32) || defined(ES_ARCH_X86_64)

View File

@ -74,6 +74,7 @@ typedef struct AttributeFilename {
typedef struct AttributeDirectory { typedef struct AttributeDirectory {
/* 0 */ uint16_t type; // ESFS_ATTRIBUTE_DIRECTORY. /* 0 */ uint16_t type; // ESFS_ATTRIBUTE_DIRECTORY.
/* 2 */ uint16_t size; // The size in bytes. Must be 8 byte aligned. /* 2 */ uint16_t size; // The size in bytes. Must be 8 byte aligned.
/* 4 */ uint8_t _unused0[4];
/* 8 */ uint64_t childNodes; // The number of child nodes in the directory. /* 8 */ uint64_t childNodes; // The number of child nodes in the directory.
/* 16 */ uint64_t indexRootBlock; // The block containing the root IndexVertex for the directory. /* 16 */ uint64_t indexRootBlock; // The block containing the root IndexVertex for the directory.
/* 24 */ uint64_t totalSize; // The sum of sizes of all the directory's children in bytes. /* 24 */ uint64_t totalSize; // The sum of sizes of all the directory's children in bytes.
@ -157,12 +158,14 @@ typedef struct Superblock {
/* 52 */ uint32_t checksum; // CRC-32 checksum of Superblock. /* 52 */ uint32_t checksum; // CRC-32 checksum of Superblock.
/* 56 */ uint8_t mounted; // Non-zero to indicate that the volume is mounted, or was not properly unmounted. /* 56 */ uint8_t mounted; // Non-zero to indicate that the volume is mounted, or was not properly unmounted.
/* 57 */ uint8_t _unused2[7];
/* 64 */ uint64_t blockSize; // The size of a block on the volume. /* 64 */ uint64_t blockSize; // The size of a block on the volume.
/* 72 */ uint64_t blockCount; // The number of blocks on the volume. /* 72 */ uint64_t blockCount; // The number of blocks on the volume.
/* 80 */ uint64_t blocksUsed; // The number of blocks that are in use. /* 80 */ uint64_t blocksUsed; // The number of blocks that are in use.
/* 88 */ uint32_t blocksPerGroup; // The number of blocks in a group. /* 88 */ uint32_t blocksPerGroup; // The number of blocks in a group.
/* 92 */ uint8_t _unused3[4];
/* 96 */ uint64_t groupCount; // The number of groups on the volume. /* 96 */ uint64_t groupCount; // The number of groups on the volume.
/* 104 */ uint64_t blocksPerGroupBlockBitmap; // The number of blocks used to a store a group's block bitmap. /* 104 */ uint64_t blocksPerGroupBlockBitmap; // The number of blocks used to a store a group's block bitmap.
/* 112 */ uint64_t gdtFirstBlock; // The first block in the group descriptor table. /* 112 */ uint64_t gdtFirstBlock; // The first block in the group descriptor table.

View File

@ -55,6 +55,6 @@ if [ ! -f "bin/good_compiler.txt" ]; then
fi fi
# Compile and run Build. # Compile and run Build.
gcc -o bin/build -g util/build.c -Wall -Wextra -Wno-format-security -Wno-format-overflow \ gcc -o bin/build -g util/build.c -pthread -DPARALLEL_BUILD -D${ES_TARGET-TARGET_X86_64} \
-Wno-missing-field-initializers -Wno-unused-function -Wno-format-truncation -pthread -DPARALLEL_BUILD \ -Wall -Wextra -Wno-format-security -Wno-format-overflow -Wno-missing-field-initializers -Wno-unused-function -Wno-format-truncation \
&& bin/build "$@" && bin/build "$@"

View File

@ -2,23 +2,20 @@
#define _GNU_SOURCE #define _GNU_SOURCE
#endif #endif
#if 0 #if defined(TARGET_X86_64)
#define TOOLCHAIN_PREFIX "x86_64-essence" #define TOOLCHAIN_PREFIX "x86_64-essence"
#define TARGET_NAME "x86_64" #define TARGET_NAME "x86_64"
#define TOOLCHAIN_HAS_RED_ZONE #define TOOLCHAIN_HAS_RED_ZONE
#define TOOLCHAIN_HAS_CSTDLIB #define TOOLCHAIN_HAS_CSTDLIB
#define QEMU_EXECUTABLE "qemu-system-x86_64" #define QEMU_EXECUTABLE "qemu-system-x86_64"
#else #elif defined(TARGET_X86_32)
#define TOOLCHAIN_PREFIX "i686-elf" #define TOOLCHAIN_PREFIX "i686-elf"
#define TARGET_NAME "x86_32" #define TARGET_NAME "x86_32"
#define QEMU_EXECUTABLE "qemu-system-i386" #define QEMU_EXECUTABLE "qemu-system-i386"
#else
#error Unknown target.
#endif #endif
#define WARNING_FLAGS \
" -Wall -Wextra -Wno-missing-field-initializers -Wno-pmf-conversions -Wno-frame-address -Wno-unused-function -Wno-format-truncation -Wno-invalid-offsetof "
#define WARNING_FLAGS_C \
" -Wall -Wextra -Wno-missing-field-initializers -Wno-unused-function -Wno-format-truncation -Wno-unused-parameter "
#include <stdint.h> #include <stdint.h>
#include <stdarg.h> #include <stdarg.h>
@ -355,11 +352,13 @@ void Compile(uint32_t flags, int partitionSize, const char *volumeLabel) {
} }
void BuildUtilities() { void BuildUtilities() {
#define WARNING_FLAGS " -Wall -Wextra -Wno-missing-field-initializers -Wno-unused-function -Wno-format-truncation -Wno-unused-parameter "
buildStartTimeStamp = time(NULL); buildStartTimeStamp = time(NULL);
#define BUILD_UTILITY(x, y, z) \ #define BUILD_UTILITY(x, y, z) \
if (CheckDependencies("Utilities." x)) { \ if (CheckDependencies("Utilities." x)) { \
if (!CallSystem("gcc -MMD util/" z x ".c -o bin/" x " -g -std=c2x " WARNING_FLAGS_C " " y)) { \ if (!CallSystem("gcc -MMD util/" z x ".c -o bin/" x " -g -std=c2x " WARNING_FLAGS " " y)) { \
ParseDependencies("bin/" x ".d", "Utilities." x, false); \ ParseDependencies("bin/" x ".d", "Utilities." x, false); \
} \ } \
} }
@ -1124,6 +1123,8 @@ void DoCommand(const char *l) {
BuildAndRun(OPTIMISE_ON, true /* compile */, false /* debug */, -1, LOG_NORMAL); BuildAndRun(OPTIMISE_ON, true /* compile */, false /* debug */, -1, LOG_NORMAL);
} else if (0 == strcmp(l, "d") || 0 == strcmp(l, "debug")) { } else if (0 == strcmp(l, "d") || 0 == strcmp(l, "debug")) {
BuildAndRun(OPTIMISE_OFF, true /* compile */, true /* debug */, EMULATOR_QEMU, LOG_NORMAL); BuildAndRun(OPTIMISE_OFF, true /* compile */, true /* debug */, EMULATOR_QEMU, LOG_NORMAL);
} else if (0 == strcmp(l, "dlv")) {
BuildAndRun(OPTIMISE_OFF, true /* compile */, true /* debug */, EMULATOR_QEMU, LOG_VERBOSE);
} else if (0 == strcmp(l, "d3") || 0 == strcmp(l, "debug-without-compile")) { } else if (0 == strcmp(l, "d3") || 0 == strcmp(l, "debug-without-compile")) {
BuildAndRun(OPTIMISE_OFF, false /* compile */, true /* debug */, EMULATOR_QEMU, LOG_NORMAL); BuildAndRun(OPTIMISE_OFF, false /* compile */, true /* debug */, EMULATOR_QEMU, LOG_NORMAL);
} else if (0 == strcmp(l, "v") || 0 == strcmp(l, "vbox")) { } else if (0 == strcmp(l, "v") || 0 == strcmp(l, "vbox")) {
@ -1550,7 +1551,7 @@ int main(int _argc, char **_argv) {
coloredOutput = isatty(STDERR_FILENO); coloredOutput = isatty(STDERR_FILENO);
if (argc == 1) { if (argc == 1) {
printf(ColorHighlight "Essence Build" ColorNormal "\nPress Ctrl-C to exit.\n"); printf(ColorHighlight "Essence Build" ColorNormal "\nPress Ctrl-C to exit.\nCross target is " ColorHighlight TARGET_NAME ColorNormal ".\n");
} }
systemLog = fopen("bin/system.log", "w"); systemLog = fopen("bin/system.log", "w");

View File

@ -1349,6 +1349,7 @@ int main(int argc, char **argv) {
bootUseVBE = !!atoi(s.value); bootUseVBE = !!atoi(s.value);
} else if (0 == strcmp(s.key, "Flag.COM_OUTPUT") && atoi(s.value)) { } else if (0 == strcmp(s.key, "Flag.COM_OUTPUT") && atoi(s.value)) {
strcat(commonAssemblyFlags, " -DCOM_OUTPUT "); strcat(commonAssemblyFlags, " -DCOM_OUTPUT ");
strcat(commonCompileFlags, " -DCOM_OUTPUT ");
} else if (0 == strcmp(s.key, "BuildCore.NoImportPOSIX")) { } else if (0 == strcmp(s.key, "BuildCore.NoImportPOSIX")) {
noImportPOSIX = !!atoi(s.value); noImportPOSIX = !!atoi(s.value);
} else if (0 == memcmp(s.key, "General.", 8)) { } else if (0 == memcmp(s.key, "General.", 8)) {