mirror of https://gitlab.com/nakst/essence
kernel cleanup 2
This commit is contained in:
parent
6fa375a9d8
commit
7a0b832c36
1138
drivers/acpi.cpp
1138
drivers/acpi.cpp
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,637 @@
|
|||
// TODO Warning: Not all of the OSL has been tested.
|
||||
|
||||
extern "C" {
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter" push
|
||||
#include <ports/acpica/include/acpi.h>
|
||||
#pragma GCC diagnostic pop
|
||||
}
|
||||
|
||||
struct ACPICAEvent {
|
||||
ACPI_OSD_EXEC_CALLBACK function;
|
||||
void *context;
|
||||
};
|
||||
|
||||
// TODO Can these arrays be made smaller?
|
||||
Thread *acpiEvents[256];
|
||||
size_t acpiEventCount;
|
||||
ACPI_OSD_HANDLER acpiInterruptHandlers[256];
|
||||
void *acpiInterruptContexts[256];
|
||||
uint8_t acpicaPageBuffer[K_PAGE_SIZE];
|
||||
KMutex acpicaPageBufferMutex;
|
||||
char acpiPrintf[4096];
|
||||
bool acpiOSLayerActive = false;
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsInitialize() {
|
||||
if (acpiOSLayerActive) KernelPanic("AcpiOsInitialize - ACPI has already been initialised.\n");
|
||||
acpiOSLayerActive = true;
|
||||
KernelLog(LOG_INFO, "ACPI", "initialise ACPICA", "AcpiOsInitialize - Initialising ACPICA OS layer...\n");
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsTerminate() {
|
||||
if (!acpiOSLayerActive) KernelPanic("AcpiOsTerminate - ACPI has not been initialised.\n");
|
||||
acpiOSLayerActive = false;
|
||||
KernelLog(LOG_INFO, "ACPI", "terminate ACPICA", "AcpiOsTerminate - Terminating ACPICA OS layer...\n");
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_PHYSICAL_ADDRESS AcpiOsGetRootPointer() {
|
||||
ACPI_PHYSICAL_ADDRESS address = 0;
|
||||
|
||||
uint64_t uefiRSDP = *((uint64_t *) (LOW_MEMORY_MAP_START + GetBootloaderInformationOffset() + 0x7FE8));
|
||||
|
||||
if (uefiRSDP) {
|
||||
return uefiRSDP;
|
||||
}
|
||||
|
||||
AcpiFindRootPointer(&address);
|
||||
return address;
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES *predefinedObject, ACPI_STRING *newValue) {
|
||||
(void) predefinedObject;
|
||||
*newValue = nullptr;
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsTableOverride(ACPI_TABLE_HEADER *existingTable, ACPI_TABLE_HEADER **newTable) {
|
||||
(void) existingTable;
|
||||
*newTable = nullptr;
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsPhysicalTableOverride(ACPI_TABLE_HEADER *existingTable, ACPI_PHYSICAL_ADDRESS *newAddress, uint32_t *newTableLength) {
|
||||
(void) existingTable;
|
||||
*newAddress = 0;
|
||||
*newTableLength = 0;
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ES_EXTERN_C void *AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS physicalAddress, ACPI_SIZE length) {
|
||||
return ACPIMapPhysicalMemory(physicalAddress, length);
|
||||
}
|
||||
|
||||
ES_EXTERN_C void AcpiOsUnmapMemory(void *address, ACPI_SIZE length) {
|
||||
#ifdef ARCH_X86_COMMON
|
||||
if ((uintptr_t) address - (uintptr_t) LOW_MEMORY_MAP_START < (uintptr_t) LOW_MEMORY_LIMIT) {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
(void) length;
|
||||
MMFree(kernelMMSpace, address);
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsGetPhysicalAddress(void *virtualAddress, ACPI_PHYSICAL_ADDRESS *physicalAddress) {
|
||||
if (!virtualAddress || !physicalAddress) {
|
||||
return AE_BAD_PARAMETER;
|
||||
}
|
||||
|
||||
*physicalAddress = MMArchTranslateAddress(kernelMMSpace, (uintptr_t) virtualAddress);
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ES_EXTERN_C void *AcpiOsAllocate(ACPI_SIZE size) {
|
||||
return EsHeapAllocate(size, false, K_FIXED);
|
||||
}
|
||||
|
||||
ES_EXTERN_C void AcpiOsFree(void *memory) {
|
||||
EsHeapFree(memory, 0, K_FIXED);
|
||||
}
|
||||
|
||||
ES_EXTERN_C BOOLEAN AcpiOsReadable(void *memory, ACPI_SIZE length) {
|
||||
(void) memory;
|
||||
(void) length;
|
||||
// This is only used by the debugger, which we don't use...
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
ES_EXTERN_C BOOLEAN AcpiOsWritable(void *memory, ACPI_SIZE length) {
|
||||
(void) memory;
|
||||
(void) length;
|
||||
// This is only used by the debugger, which we don't use...
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_THREAD_ID AcpiOsGetThreadId() {
|
||||
return GetCurrentThread()->id + 1;
|
||||
}
|
||||
|
||||
void RunACPICAEvent(void *e) {
|
||||
ACPICAEvent *event = (ACPICAEvent *) e;
|
||||
event->function(event->context);
|
||||
EsHeapFree(event, 0, K_FIXED);
|
||||
scheduler.TerminateThread(GetCurrentThread());
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsExecute(ACPI_EXECUTE_TYPE type, ACPI_OSD_EXEC_CALLBACK function, void *context) {
|
||||
(void) type;
|
||||
|
||||
if (!function) return AE_BAD_PARAMETER;
|
||||
|
||||
ACPICAEvent *event = (ACPICAEvent *) EsHeapAllocate(sizeof(ACPICAEvent), true, K_FIXED);
|
||||
event->function = function;
|
||||
event->context = context;
|
||||
|
||||
Thread *thread = scheduler.SpawnThread("ACPICAEvent", (uintptr_t) RunACPICAEvent, (uintptr_t) event);
|
||||
|
||||
if (acpiEventCount == 256) {
|
||||
KernelPanic("AcpiOsExecute - Exceeded maximum event count, 256.\n");
|
||||
}
|
||||
|
||||
if (thread) {
|
||||
acpiEvents[acpiEventCount++] = thread;
|
||||
return AE_OK;
|
||||
} else {
|
||||
return AE_NO_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
ES_EXTERN_C void AcpiOsSleep(UINT64 ms) {
|
||||
KEvent event = {};
|
||||
KEventWait(&event, ms);
|
||||
}
|
||||
|
||||
ES_EXTERN_C void AcpiOsStall(UINT32 mcs) {
|
||||
(void) mcs;
|
||||
uint64_t start = ProcessorReadTimeStamp();
|
||||
uint64_t end = start + mcs * (timeStampTicksPerMs / 1000);
|
||||
while (ProcessorReadTimeStamp() < end);
|
||||
}
|
||||
|
||||
ES_EXTERN_C void AcpiOsWaitEventsComplete() {
|
||||
for (uintptr_t i = 0; i < acpiEventCount; i++) {
|
||||
Thread *thread = acpiEvents[i];
|
||||
KEventWait(&thread->killedEvent, ES_WAIT_NO_TIMEOUT);
|
||||
CloseHandleToObject(thread, KERNEL_OBJECT_THREAD);
|
||||
}
|
||||
|
||||
acpiEventCount = 0;
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsCreateSemaphore(UINT32 maxUnits, UINT32 initialUnits, ACPI_SEMAPHORE *handle) {
|
||||
if (!handle) return AE_BAD_PARAMETER;
|
||||
|
||||
KSemaphore *semaphore = (KSemaphore *) EsHeapAllocate(sizeof(KSemaphore), true, K_FIXED);
|
||||
KSemaphoreReturn(semaphore, initialUnits);
|
||||
semaphore->_custom = maxUnits;
|
||||
*handle = semaphore;
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsDeleteSemaphore(ACPI_SEMAPHORE handle) {
|
||||
if (!handle) return AE_BAD_PARAMETER;
|
||||
EsHeapFree(handle, sizeof(KSemaphore), K_FIXED);
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsWaitSemaphore(ACPI_SEMAPHORE handle, UINT32 units, UINT16 timeout) {
|
||||
(void) timeout;
|
||||
if (!handle) return AE_BAD_PARAMETER;
|
||||
KSemaphore *semaphore = (KSemaphore *) handle;
|
||||
|
||||
if (KSemaphoreTake(semaphore, units, timeout == (UINT16) -1 ? ES_WAIT_NO_TIMEOUT : timeout)) {
|
||||
return AE_OK;
|
||||
} else {
|
||||
return AE_TIME;
|
||||
}
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsSignalSemaphore(ACPI_SEMAPHORE handle, UINT32 units) {
|
||||
if (!handle) return AE_BAD_PARAMETER;
|
||||
KSemaphore *semaphore = (KSemaphore *) handle;
|
||||
if (semaphore->units + units > semaphore->_custom) return AE_LIMIT;
|
||||
KSemaphoreReturn(semaphore, units);
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsCreateLock(ACPI_SPINLOCK *handle) {
|
||||
if (!handle) return AE_BAD_PARAMETER;
|
||||
KSpinlock *spinlock = (KSpinlock *) EsHeapAllocate(sizeof(KSpinlock), true, K_FIXED);
|
||||
*handle = spinlock;
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ES_EXTERN_C void AcpiOsDeleteLock(ACPI_HANDLE handle) {
|
||||
EsHeapFree(handle, sizeof(KSpinlock), K_FIXED);
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_CPU_FLAGS AcpiOsAcquireLock(ACPI_SPINLOCK handle) {
|
||||
KSpinlock *spinlock = (KSpinlock *) handle;
|
||||
KSpinlockAcquire(spinlock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ES_EXTERN_C void AcpiOsReleaseLock(ACPI_SPINLOCK handle, ACPI_CPU_FLAGS flags) {
|
||||
(void) flags;
|
||||
KSpinlock *spinlock = (KSpinlock *) handle;
|
||||
KSpinlockRelease(spinlock);
|
||||
}
|
||||
|
||||
bool ACPIInterrupt(uintptr_t interruptIndex, void *) {
|
||||
if (acpiInterruptHandlers[interruptIndex]) {
|
||||
return ACPI_INTERRUPT_HANDLED == acpiInterruptHandlers[interruptIndex](acpiInterruptContexts[interruptIndex]);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsInstallInterruptHandler(UINT32 interruptLevel, ACPI_OSD_HANDLER handler, void *context) {
|
||||
if (interruptLevel > 256 || !handler) return AE_BAD_PARAMETER;
|
||||
|
||||
if (acpiInterruptHandlers[interruptLevel]) {
|
||||
return AE_ALREADY_EXISTS;
|
||||
}
|
||||
|
||||
acpiInterruptHandlers[interruptLevel] = handler;
|
||||
acpiInterruptContexts[interruptLevel] = context;
|
||||
|
||||
return KRegisterIRQ(interruptLevel, ACPIInterrupt, nullptr, "ACPICA") ? AE_OK : AE_ERROR;
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsRemoveInterruptHandler(UINT32 interruptNumber, ACPI_OSD_HANDLER handler) {
|
||||
if (interruptNumber > 256 || !handler) return AE_BAD_PARAMETER;
|
||||
|
||||
if (!acpiInterruptHandlers[interruptNumber]) {
|
||||
return AE_NOT_EXIST;
|
||||
}
|
||||
|
||||
if (handler != acpiInterruptHandlers[interruptNumber]) {
|
||||
return AE_BAD_PARAMETER;
|
||||
}
|
||||
|
||||
acpiInterruptHandlers[interruptNumber] = nullptr;
|
||||
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS address, UINT64 *value, UINT32 width) {
|
||||
KMutexAcquire(&acpicaPageBufferMutex);
|
||||
EsDefer(KMutexRelease(&acpicaPageBufferMutex));
|
||||
|
||||
uintptr_t page = (uintptr_t) address & ~(K_PAGE_SIZE - 1);
|
||||
uintptr_t offset = (uintptr_t) address & (K_PAGE_SIZE - 1);
|
||||
|
||||
PMRead(page, acpicaPageBuffer, 1);
|
||||
|
||||
if (width == 64) {
|
||||
*value = *((uint64_t *) (acpicaPageBuffer + offset));
|
||||
} else if (width == 32) {
|
||||
*value = *((uint32_t *) (acpicaPageBuffer + offset));
|
||||
} else if (width == 16) {
|
||||
*value = *((uint16_t *) (acpicaPageBuffer + offset));
|
||||
} else {
|
||||
*value = acpicaPageBuffer[offset];
|
||||
}
|
||||
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS address, UINT64 value, UINT32 width) {
|
||||
KMutexAcquire(&acpicaPageBufferMutex);
|
||||
EsDefer(KMutexRelease(&acpicaPageBufferMutex));
|
||||
|
||||
uintptr_t page = (uintptr_t) address & ~(K_PAGE_SIZE - 1);
|
||||
uintptr_t offset = (uintptr_t) address & (K_PAGE_SIZE - 1);
|
||||
|
||||
PMRead(page, acpicaPageBuffer, 1);
|
||||
|
||||
if (width == 64) {
|
||||
*((uint64_t *) (acpicaPageBuffer + offset)) = value;
|
||||
} else if (width == 32) {
|
||||
*((uint32_t *) (acpicaPageBuffer + offset)) = value;
|
||||
} else if (width == 16) {
|
||||
*((uint16_t *) (acpicaPageBuffer + offset)) = value;
|
||||
} else {
|
||||
*((uint8_t *) (acpicaPageBuffer + offset)) = value;
|
||||
}
|
||||
|
||||
PMCopy(page, acpicaPageBuffer, 1);
|
||||
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsReadPort(ACPI_IO_ADDRESS address, UINT32 *value, UINT32 width) {
|
||||
// EsPrint("AcpiOsReadPort - %x, %d", address, width);
|
||||
|
||||
if (width == 8) {
|
||||
*value = ProcessorIn8(address);
|
||||
} else if (width == 16) {
|
||||
*value = ProcessorIn16(address);
|
||||
} else if (width == 32) {
|
||||
*value = ProcessorIn32(address);
|
||||
} else {
|
||||
return AE_ERROR;
|
||||
}
|
||||
|
||||
// EsPrint(" - %x\n", *value);
|
||||
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsWritePort(ACPI_IO_ADDRESS address, UINT32 value, UINT32 width) {
|
||||
// EsPrint("AcpiOsWritePort - %x, %x, %d", address, value, width);
|
||||
|
||||
if (width == 8) {
|
||||
ProcessorOut8(address, (uint8_t) value);
|
||||
} else if (width == 16) {
|
||||
ProcessorOut16(address, (uint16_t) value);
|
||||
} else if (width == 32) {
|
||||
ProcessorOut32(address, (uint32_t) value);
|
||||
} else {
|
||||
return AE_ERROR;
|
||||
}
|
||||
|
||||
// EsPrint(" - ;;\n");
|
||||
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsReadPciConfiguration(ACPI_PCI_ID *address, UINT32 reg, UINT64 *value, UINT32 width) {
|
||||
if (width == 64) {
|
||||
uint64_t x = (uint64_t) KPCIReadConfig(address->Bus, address->Device, address->Function, reg)
|
||||
| ((uint64_t) KPCIReadConfig(address->Bus, address->Device, address->Function, reg + 4) << 32);
|
||||
*value = x;
|
||||
} else {
|
||||
uint32_t x = KPCIReadConfig(address->Bus, address->Device, address->Function, reg & ~3);
|
||||
x >>= (reg & 3) * 8;
|
||||
|
||||
if (width == 8) x &= 0xFF;
|
||||
if (width == 16) x &= 0xFFFF;
|
||||
|
||||
*value = x;
|
||||
}
|
||||
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsWritePciConfiguration(ACPI_PCI_ID *address, UINT32 reg, UINT64 value, UINT32 width) {
|
||||
if (width == 64) {
|
||||
KPCIWriteConfig(address->Bus, address->Device, address->Function, reg, value);
|
||||
KPCIWriteConfig(address->Bus, address->Device, address->Function, reg + 4, value >> 32);
|
||||
} else if (width == 32) {
|
||||
KPCIWriteConfig(address->Bus, address->Device, address->Function, reg, value);
|
||||
} else {
|
||||
uint32_t x = KPCIReadConfig(address->Bus, address->Device, address->Function, reg & ~3);
|
||||
uint32_t o = reg & 3;
|
||||
|
||||
if (width == 16) {
|
||||
if (o == 2) {
|
||||
x = (x & ~0xFFFF0000) | (value << 16);
|
||||
} else {
|
||||
x = (x & ~0x0000FFFF) | (value << 0);
|
||||
}
|
||||
} else if (width == 8) {
|
||||
if (o == 3) {
|
||||
x = (x & ~0xFF000000) | (value << 24);
|
||||
} else if (o == 2) {
|
||||
x = (x & ~0x00FF0000) | (value << 16);
|
||||
} else if (o == 1) {
|
||||
x = (x & ~0x0000FF00) | (value << 8);
|
||||
} else {
|
||||
x = (x & ~0x000000FF) | (value << 0);
|
||||
}
|
||||
}
|
||||
|
||||
KPCIWriteConfig(address->Bus, address->Device, address->Function, reg & ~3, x);
|
||||
}
|
||||
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ES_EXTERN_C void AcpiOsPrintf(const char *format, ...) {
|
||||
va_list arguments;
|
||||
va_start(arguments, format);
|
||||
int x = stbsp_vsnprintf(acpiPrintf, sizeof(acpiPrintf), format, arguments);
|
||||
EsPrint("%s", x, acpiPrintf);
|
||||
va_end(arguments);
|
||||
}
|
||||
|
||||
ES_EXTERN_C void AcpiOsVprintf(const char *format, va_list arguments) {
|
||||
int x = stbsp_vsnprintf(acpiPrintf, sizeof(acpiPrintf), format, arguments);
|
||||
EsPrint("%s", x, acpiPrintf);
|
||||
}
|
||||
|
||||
ES_EXTERN_C UINT64 AcpiOsGetTimer() {
|
||||
uint64_t tick = ProcessorReadTimeStamp();
|
||||
uint64_t ticksPerMs = timeStampTicksPerMs;
|
||||
uint64_t ticksPer100Ns = ticksPerMs / 1000 / 10;
|
||||
if (ticksPer100Ns == 0) return tick;
|
||||
return tick / ticksPer100Ns;
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsSignal(UINT32 function, void *information) {
|
||||
(void) function;
|
||||
(void) information;
|
||||
KernelPanic("AcpiOsSignal - ACPI requested kernel panic.\n");
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsEnterSleep(UINT8 sleepState, UINT32 registerAValue, UINT32 registerBValue) {
|
||||
(void) sleepState;
|
||||
(void) registerAValue;
|
||||
(void) registerBValue;
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
UINT32 ACPIPowerButtonPressed(void *) {
|
||||
KRegisterAsyncTask([] (EsGeneric) {
|
||||
_EsMessageWithObject m = { nullptr, ES_MSG_POWER_BUTTON_PRESSED };
|
||||
if (scheduler.shutdown) return;
|
||||
if (desktopProcess) desktopProcess->messageQueue.SendMessage(&m);
|
||||
}, nullptr, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t ACPIFindIRQ(ACPI_HANDLE object) {
|
||||
ACPI_BUFFER buffer = {};
|
||||
ACPI_STATUS status = AcpiGetCurrentResources(object, &buffer);
|
||||
if (status != AE_BUFFER_OVERFLOW) return -1;
|
||||
buffer.Pointer = EsHeapAllocate(buffer.Length, false, K_FIXED);
|
||||
EsDefer(EsHeapFree(buffer.Pointer, buffer.Length, K_FIXED));
|
||||
if (!buffer.Pointer) return -1;
|
||||
status = AcpiGetCurrentResources(object, &buffer);
|
||||
if (status != AE_OK) return -1;
|
||||
ACPI_RESOURCE *resource = (ACPI_RESOURCE *) buffer.Pointer;
|
||||
|
||||
while (resource->Type != ACPI_RESOURCE_TYPE_END_TAG) {
|
||||
if (resource->Type == ACPI_RESOURCE_TYPE_IRQ) {
|
||||
if (resource->Data.Irq.InterruptCount) {
|
||||
return resource->Data.Irq.Interrupts[0];
|
||||
}
|
||||
} else if (resource->Type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) {
|
||||
if (resource->Data.ExtendedIrq.InterruptCount) {
|
||||
return resource->Data.ExtendedIrq.Interrupts[0];
|
||||
}
|
||||
}
|
||||
|
||||
resource = (ACPI_RESOURCE *) ((uint8_t *) resource + resource->Length);
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
void ACPIEnumeratePRTEntries(ACPI_HANDLE pciBus) {
|
||||
// TODO Other PCI buses.
|
||||
// TODO Is this always bus 0?
|
||||
|
||||
ACPI_BUFFER buffer = {};
|
||||
ACPI_STATUS status = AcpiGetIrqRoutingTable(pciBus, &buffer);
|
||||
if (status != AE_BUFFER_OVERFLOW) return;
|
||||
buffer.Pointer = EsHeapAllocate(buffer.Length, false, K_FIXED);
|
||||
EsDefer(EsHeapFree(buffer.Pointer, buffer.Length, K_FIXED));
|
||||
if (!buffer.Pointer) return;
|
||||
status = AcpiGetIrqRoutingTable(pciBus, &buffer);
|
||||
if (status != AE_OK) return;
|
||||
ACPI_PCI_ROUTING_TABLE *table = (ACPI_PCI_ROUTING_TABLE *) buffer.Pointer;
|
||||
|
||||
while (table->Length) {
|
||||
ACPI_HANDLE source;
|
||||
|
||||
if (AE_OK == AcpiGetHandle(pciBus, table->Source, &source)) {
|
||||
int32_t irq = ACPIFindIRQ(source);
|
||||
|
||||
if (irq != -1) {
|
||||
KernelLog(LOG_INFO, "ACPI", "PRT entry", "Pin: %d; PCI slot: %X; IRQ: %d\n",
|
||||
table->Pin, (table->Address >> 16) & 0xFF, irq);
|
||||
|
||||
if (irq != 9 && irq != 10 && irq != 11) {
|
||||
KernelLog(LOG_ERROR, "ACPI", "unexpected IRQ", "IRQ %d was unexpected; expected values are 9, 10 or 11.\n", irq);
|
||||
} else if ((table->Address >> 16) > 0xFF) {
|
||||
KernelLog(LOG_ERROR, "ACPI", "unexpected address", "Address %x was larger than expected.\n", table->Address);
|
||||
} else if (table->Pin > 3) {
|
||||
KernelLog(LOG_ERROR, "ACPI", "unexpected pin", "Pin %d was larger than expected.\n", table->Pin);
|
||||
} else {
|
||||
ArchSetPCIIRQLine(table->Address >> 16, table->Pin, irq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
table = (ACPI_PCI_ROUTING_TABLE *) ((uint8_t *) table + table->Length);
|
||||
}
|
||||
}
|
||||
|
||||
struct KACPIObject : KDevice {
|
||||
ACPI_HANDLE handle;
|
||||
KACPINotificationHandler notificationHandler;
|
||||
EsGeneric notificationHandlerContext;
|
||||
};
|
||||
|
||||
void ACPINotificationHandler(ACPI_HANDLE, uint32_t value, void *context) {
|
||||
KernelLog(LOG_INFO, "ACPI", "notification", "Received a notification with value %X.\n", value);
|
||||
KACPIObject *object = (KACPIObject *) context;
|
||||
object->notificationHandler(object, value, object->notificationHandlerContext);
|
||||
}
|
||||
|
||||
EsError KACPIObjectSetDeviceNotificationHandler(KACPIObject *object, KACPINotificationHandler handler, EsGeneric context) {
|
||||
object->notificationHandler = handler;
|
||||
object->notificationHandlerContext = context;
|
||||
ACPI_STATUS status = AcpiInstallNotifyHandler(object->handle, ACPI_DEVICE_NOTIFY, ACPINotificationHandler, object);
|
||||
if (status == AE_OK) return ES_SUCCESS;
|
||||
else if (status == AE_NO_MEMORY) return ES_ERROR_INSUFFICIENT_RESOURCES;
|
||||
else return ES_ERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
EsError KACPIObjectEvaluateInteger(KACPIObject *object, const char *pathName, uint64_t *_integer) {
|
||||
ACPI_BUFFER buffer = {};
|
||||
buffer.Length = ACPI_ALLOCATE_BUFFER;
|
||||
|
||||
ACPI_STATUS status = AcpiEvaluateObject(object->handle, (char *) pathName, nullptr, &buffer);
|
||||
EsError error = ES_SUCCESS;
|
||||
|
||||
if (status == AE_OK) {
|
||||
ACPI_OBJECT *result = (ACPI_OBJECT *) buffer.Pointer;
|
||||
|
||||
if (result->Type == ACPI_TYPE_INTEGER) {
|
||||
if (_integer) {
|
||||
*_integer = result->Integer.Value;
|
||||
}
|
||||
} else {
|
||||
error = ES_ERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
ACPI_FREE(buffer.Pointer);
|
||||
} else if (status == AE_NO_MEMORY) {
|
||||
error = ES_ERROR_INSUFFICIENT_RESOURCES;
|
||||
} else if (status == AE_NOT_FOUND) {
|
||||
error = ES_ERROR_FILE_DOES_NOT_EXIST;
|
||||
} else {
|
||||
error = ES_ERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
EsError KACPIObjectEvaluateMethodWithInteger(KACPIObject *object, const char *pathName, uint64_t integer) {
|
||||
ACPI_OBJECT argument = {};
|
||||
argument.Type = ACPI_TYPE_INTEGER;
|
||||
argument.Integer.Value = integer;
|
||||
ACPI_OBJECT_LIST argumentList = {};
|
||||
argumentList.Count = 1;
|
||||
argumentList.Pointer = &argument;
|
||||
ACPI_STATUS status = AcpiEvaluateObject(object->handle, (char *) pathName, &argumentList, nullptr);
|
||||
if (status == AE_OK) return ES_SUCCESS;
|
||||
else if (status == AE_NO_MEMORY) return ES_ERROR_INSUFFICIENT_RESOURCES;
|
||||
else if (status == AE_NOT_FOUND) return ES_ERROR_FILE_DOES_NOT_EXIST;
|
||||
else return ES_ERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
ACPI_STATUS ACPIWalkNamespaceCallback(ACPI_HANDLE object, uint32_t depth, void *, void **) {
|
||||
ACPI_DEVICE_INFO *information;
|
||||
AcpiGetObjectInfo(object, &information);
|
||||
|
||||
char name[5];
|
||||
EsMemoryCopy(name, &information->Name, 4);
|
||||
name[4] = 0;
|
||||
|
||||
if (information->Type == ACPI_TYPE_DEVICE) {
|
||||
KernelLog(LOG_INFO, "ACPI", "device object", "Found device object '%z' at depth %d with HID '%z', UID '%z' and address %x.\n",
|
||||
name, depth,
|
||||
(information->Valid & ACPI_VALID_HID) ? information->HardwareId.String : "??",
|
||||
(information->Valid & ACPI_VALID_UID) ? information->UniqueId.String : "??",
|
||||
(information->Valid & ACPI_VALID_ADR) ? information->Address : 0);
|
||||
}
|
||||
|
||||
if (information->Type == ACPI_TYPE_THERMAL) {
|
||||
KACPIObject *device = (KACPIObject *) KDeviceCreate("ACPI object", acpi.computer, sizeof(KACPIObject));
|
||||
|
||||
if (device) {
|
||||
device->handle = object;
|
||||
KDeviceAttachByName(device, "ACPIThermal");
|
||||
}
|
||||
}
|
||||
|
||||
ACPI_FREE(information);
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
void ArchShutdown(uintptr_t action) {
|
||||
if (action == SHUTDOWN_ACTION_RESTART) ProcessorReset();
|
||||
AcpiEnterSleepStatePrep(5);
|
||||
ProcessorDisableInterrupts();
|
||||
AcpiEnterSleepState(5);
|
||||
}
|
||||
|
||||
void ACPICAInitialise() {
|
||||
AcpiInitializeSubsystem();
|
||||
AcpiInitializeTables(nullptr, 256, true);
|
||||
AcpiLoadTables();
|
||||
AcpiEnableSubsystem(ACPI_FULL_INITIALIZATION);
|
||||
AcpiInitializeObjects(ACPI_FULL_INITIALIZATION);
|
||||
|
||||
if (AE_OK == AcpiEnableEvent(ACPI_EVENT_POWER_BUTTON, 0)
|
||||
&& AE_OK == AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON, ACPIPowerButtonPressed, nullptr)) {
|
||||
KDeviceCreate("ACPI power button", acpi.computer, sizeof(KDevice));
|
||||
}
|
||||
|
||||
void *result;
|
||||
AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 10, ACPIWalkNamespaceCallback, nullptr, nullptr, &result);
|
||||
|
||||
ACPI_HANDLE pciBus;
|
||||
char pciBusPath[] = "\\_SB_.PCI0";
|
||||
|
||||
if (AE_OK == AcpiGetHandle(nullptr, pciBusPath, &pciBus)) {
|
||||
ACPIEnumeratePRTEntries(pciBus);
|
||||
}
|
||||
}
|
|
@ -1906,14 +1906,11 @@ void FSRegisterFileSystem(KFileSystem *fileSystem) {
|
|||
}
|
||||
|
||||
void FSRegisterBlockDevice(KBlockDevice *device) {
|
||||
#ifdef SERIAL_STARTUP
|
||||
FSDetectFileSystem(device);
|
||||
#else
|
||||
KThreadCreate("FSDetect", [] (uintptr_t context) {
|
||||
FSDetectFileSystem((KBlockDevice *) context);
|
||||
KBlockDevice *device = (KBlockDevice *) context;
|
||||
FSDetectFileSystem(device);
|
||||
KDeviceSendConnectedMessage(device, ES_DEVICE_BLOCK);
|
||||
}, (uintptr_t) device);
|
||||
#endif
|
||||
KDeviceSendConnectedMessage(device, ES_DEVICE_BLOCK);
|
||||
}
|
||||
|
||||
void FSShutdown() {
|
||||
|
|
|
@ -90,7 +90,7 @@ struct CPULocalStorage {
|
|||
unsigned processorID;
|
||||
size_t spinlockCount;
|
||||
|
||||
ArchCPU *archCPU;
|
||||
struct ArchCPU *archCPU;
|
||||
|
||||
// TODO Have separate interrupt task threads and system worker threads (with no task limit).
|
||||
#define MAX_ASYNC_TASKS (256)
|
||||
|
@ -121,54 +121,45 @@ extern "C" {
|
|||
InterruptContext *ArchInitialiseThread(uintptr_t kernelStack, uintptr_t kernelStackSize, struct Thread *thread,
|
||||
uintptr_t startAddress, uintptr_t argument1, uintptr_t argument2,
|
||||
bool userland, uintptr_t stack, uintptr_t userStackSize);
|
||||
EsError ArchApplyRelocation(uintptr_t type, uint8_t *buffer, uintptr_t offset, uintptr_t result);
|
||||
void ArchSwitchContext(struct InterruptContext *context, struct MMArchVAS *virtualAddressSpace, uintptr_t threadKernelStack,
|
||||
struct Thread *newThread, struct MMSpace *oldAddressSpace);
|
||||
EsError ArchApplyRelocation(uintptr_t type, uint8_t *buffer, uintptr_t offset, uintptr_t result);
|
||||
|
||||
void MMArchRemap(MMSpace *space, const void *virtualAddress, uintptr_t newPhysicalAddress); // Must be done with interrupts disabled; does not invalidate on other processors.
|
||||
bool MMArchMapPage(MMSpace *space, uintptr_t physicalAddress, uintptr_t virtualAddress, unsigned flags); // Returns false if the page was already mapped.
|
||||
void MMArchUnmapPages(MMSpace *space, uintptr_t virtualAddressStart, uintptr_t pageCount, unsigned flags, size_t unmapMaximum = 0, uintptr_t *resumePosition = nullptr);
|
||||
void MMArchInvalidatePages(uintptr_t virtualAddressStart, uintptr_t pageCount);
|
||||
bool MMArchHandlePageFault(uintptr_t address, uint32_t flags);
|
||||
void MMArchInitialiseVAS();
|
||||
bool MMArchInitialiseUserSpace(MMSpace *space);
|
||||
bool MMArchCommitPageTables(MMSpace *space, struct MMRegion *region);
|
||||
void MMArchRemap(MMSpace *space, const void *virtualAddress, uintptr_t newPhysicalAddress); // Must be done with interrupts disabled; does not invalidate on other processors.
|
||||
bool MMArchMakePageWritable(MMSpace *space, uintptr_t virtualAddress);
|
||||
bool MMArchHandlePageFault(uintptr_t address, uint32_t flags);
|
||||
void MMArchInvalidatePages(uintptr_t virtualAddressStart, uintptr_t pageCount);
|
||||
bool MMArchIsBufferInUserRange(uintptr_t baseAddress, size_t byteCount);
|
||||
bool MMArchSafeCopy(uintptr_t destinationAddress, uintptr_t sourceAddress, size_t byteCount); // Returns false if a page fault occured during the copy.
|
||||
bool MMArchCommitPageTables(MMSpace *space, struct MMRegion *region);
|
||||
bool MMArchInitialiseUserSpace(MMSpace *space, struct MMRegion *firstRegion);
|
||||
void MMArchInitialise();
|
||||
void MMArchFreeVAS(MMSpace *space);
|
||||
void MMArchFinalizeVAS(MMSpace *space);
|
||||
uintptr_t MMArchEarlyAllocatePage();
|
||||
uint64_t MMArchPopulatePageFrameDatabase();
|
||||
uintptr_t MMArchGetPhysicalMemoryHighest();
|
||||
|
||||
void ProcessorDisableInterrupts();
|
||||
void ProcessorEnableInterrupts();
|
||||
bool ProcessorAreInterruptsEnabled();
|
||||
|
||||
void ArchResetCPU();
|
||||
void ProcessorHalt();
|
||||
void ProcessorSendYieldIPI(Thread *thread);
|
||||
void ProcessorFakeTimerInterrupt();
|
||||
|
||||
void ProcessorInvalidatePage(uintptr_t virtualAddress);
|
||||
void ProcessorInvalidateAllPages();
|
||||
void ProcessorFlushCodeCache();
|
||||
void ProcessorFlushCache();
|
||||
|
||||
void ProcessorSetLocalStorage(struct CPULocalStorage *cls);
|
||||
void ProcessorSetThreadStorage(uintptr_t tls);
|
||||
void ProcessorSetAddressSpace(struct MMArchVAS *virtualAddressSpace); // Need to call MMSpaceOpenReference/MMSpaceCloseReference if using this.
|
||||
|
||||
uint64_t ProcessorReadTimeStamp();
|
||||
|
||||
struct CPULocalStorage *GetLocalStorage();
|
||||
struct Thread *GetCurrentThread();
|
||||
|
||||
extern PhysicalMemoryRegion *physicalMemoryRegions;
|
||||
extern size_t physicalMemoryRegionsCount;
|
||||
extern size_t physicalMemoryRegionsPagesCount;
|
||||
extern size_t physicalMemoryOriginalPagesCount;
|
||||
extern size_t physicalMemoryRegionsIndex;
|
||||
extern uintptr_t physicalMemoryHighest;
|
||||
|
||||
// From module.h:
|
||||
// uintptr_t MMArchTranslateAddress(MMSpace *space, uintptr_t virtualAddress, bool writeAccess);
|
||||
// uint32_t KPCIReadConfig(uint8_t bus, uint8_t device, uint8_t function, uint8_t offset, int size);
|
||||
|
@ -176,6 +167,16 @@ extern "C" {
|
|||
// bool KRegisterIRQ(intptr_t interruptIndex, KIRQHandler handler, void *context, const char *cOwnerName, struct KPCIDevice *pciDevice);
|
||||
// KMSIInformation KRegisterMSI(KIRQHandler handler, void *context, const char *cOwnerName);
|
||||
// void KUnregisterMSI(uintptr_t tag);
|
||||
// size_t KGetCPUCount();
|
||||
// struct CPULocalStorage *KGetCPULocal(uintptr_t index);
|
||||
|
||||
// The architecture layer must also define:
|
||||
// - MM_CORE_REGIONS_START and MM_CORE_REGIONS_COUNT.
|
||||
// - MM_KERNEL_SPACE_START and MM_KERNEL_SPACE_SIZE.
|
||||
// - MM_MODULES_START and MM_MODULES_SIZE.
|
||||
// - ArchCheckBundleHeader, ArchCheckELFHeader and ArchIsAddressInKernelSpace.
|
||||
// - K_ARCH_STACK_GROWS_DOWN or K_ARCH_STACK_GROWS_UP.
|
||||
// - K_ARCH_NAME.
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -253,7 +254,7 @@ void EsAssertionFailure(const char *file, int line) {
|
|||
|
||||
#if defined(ARCH_X86_64) && defined(IMPLEMENTATION)
|
||||
#include "x86_64.h"
|
||||
#include "terminal.cpp"
|
||||
#include <drivers/acpi.cpp>
|
||||
#include "x86_64.cpp"
|
||||
#include "terminal.cpp"
|
||||
#endif
|
||||
|
|
|
@ -153,6 +153,8 @@ struct Pool {
|
|||
|
||||
struct PMM {
|
||||
MMPageFrame *pageFrames;
|
||||
bool pageFrameDatabaseInitialised;
|
||||
uintptr_t pageFrameDatabaseCount;
|
||||
|
||||
uintptr_t firstFreePage;
|
||||
uintptr_t firstZeroedPage;
|
||||
|
@ -426,39 +428,23 @@ uintptr_t MMPhysicalAllocate(unsigned flags, uintptr_t count, uintptr_t align, u
|
|||
|
||||
bool simple = count == 1 && align == 1 && below == 0;
|
||||
|
||||
if (physicalMemoryRegionsPagesCount) {
|
||||
if (!pmm.pageFrameDatabaseInitialised) {
|
||||
// Early page allocation before the page frame database is initialised.
|
||||
|
||||
if (!simple) {
|
||||
KernelPanic("MMPhysicalAllocate - Non-simple allocation before initialisation of the page frame database.\n");
|
||||
}
|
||||
|
||||
uintptr_t i = physicalMemoryRegionsIndex;
|
||||
|
||||
while (!physicalMemoryRegions[i].pageCount) {
|
||||
i++;
|
||||
|
||||
if (i == physicalMemoryRegionsCount) {
|
||||
KernelPanic("MMPhysicalAllocate - Expected more pages in physical regions.\n");
|
||||
}
|
||||
}
|
||||
|
||||
PhysicalMemoryRegion *region = physicalMemoryRegions + i;
|
||||
uintptr_t returnValue = region->baseAddress;
|
||||
|
||||
region->baseAddress += K_PAGE_SIZE;
|
||||
region->pageCount--;
|
||||
physicalMemoryRegionsPagesCount--;
|
||||
physicalMemoryRegionsIndex = i;
|
||||
uintptr_t page = MMArchEarlyAllocatePage();
|
||||
|
||||
if (flags & MM_PHYSICAL_ALLOCATE_ZEROED) {
|
||||
// TODO Hack!
|
||||
MMArchMapPage(coreMMSpace, returnValue, (uintptr_t) earlyZeroBuffer,
|
||||
MMArchMapPage(coreMMSpace, page, (uintptr_t) earlyZeroBuffer,
|
||||
MM_MAP_PAGE_OVERWRITE | MM_MAP_PAGE_NO_NEW_TABLES | MM_MAP_PAGE_FRAME_LOCK_ACQUIRED);
|
||||
EsMemoryZero(earlyZeroBuffer, K_PAGE_SIZE);
|
||||
}
|
||||
|
||||
return returnValue;
|
||||
return page;
|
||||
} else if (!simple) {
|
||||
// Slow path.
|
||||
// TODO Use standby pages.
|
||||
|
@ -524,7 +510,7 @@ void MMPhysicalFree(uintptr_t page, bool mutexAlreadyAcquired, size_t count) {
|
|||
if (!page) KernelPanic("MMPhysicalFree - Invalid page.\n");
|
||||
if (mutexAlreadyAcquired) KMutexAssertLocked(&pmm.pageFrameMutex);
|
||||
else KMutexAcquire(&pmm.pageFrameMutex);
|
||||
if (physicalMemoryRegionsPagesCount) KernelPanic("MMPhysicalFree - PMM not yet initialised.\n");
|
||||
if (!pmm.pageFrameDatabaseInitialised) KernelPanic("MMPhysicalFree - PMM not yet initialised.\n");
|
||||
|
||||
page >>= K_PAGE_BITS;
|
||||
|
||||
|
@ -551,7 +537,7 @@ void MMPhysicalFree(uintptr_t page, bool mutexAlreadyAcquired, size_t count) {
|
|||
|
||||
void MMCheckUnusable(uintptr_t physicalStart, size_t bytes) {
|
||||
for (uintptr_t i = physicalStart / K_PAGE_SIZE; i < (physicalStart + bytes + K_PAGE_SIZE - 1) / K_PAGE_SIZE
|
||||
&& i < physicalMemoryHighest / K_PAGE_SIZE; i++) {
|
||||
&& i < pmm.pageFrameDatabaseCount; i++) {
|
||||
if (pmm.pageFrames[i].state != MMPageFrame::UNUSABLE) {
|
||||
KernelPanic("MMCheckUnusable - Page frame at address %x should be unusable.\n", i * K_PAGE_SIZE);
|
||||
}
|
||||
|
@ -1587,13 +1573,11 @@ bool MMSpaceInitialise(MMSpace *space) {
|
|||
return false;
|
||||
}
|
||||
|
||||
if (!MMArchInitialiseUserSpace(space)) {
|
||||
if (!MMArchInitialiseUserSpace(space, region)) {
|
||||
EsHeapFree(region, sizeof(MMRegion), K_CORE);
|
||||
return false;
|
||||
}
|
||||
|
||||
region->baseAddress = MM_USER_SPACE_START;
|
||||
region->pageCount = MM_USER_SPACE_SIZE / K_PAGE_SIZE;
|
||||
TreeInsert(&space->freeRegionsBase, ®ion->itemBase, region, MakeShortKey(region->baseAddress));
|
||||
TreeInsert(&space->freeRegionsSize, ®ion->itemSize, region, MakeShortKey(region->pageCount), AVL_DUPLICATE_KEYS_ALLOW);
|
||||
|
||||
|
@ -2276,21 +2260,11 @@ void MMSpaceCloseReference(MMSpace *space) {
|
|||
|
||||
void MMInitialise() {
|
||||
{
|
||||
// Initialise coreMMSpace.
|
||||
// Initialise coreMMSpace and kernelMMSpace.
|
||||
|
||||
MMArchInitialiseVAS();
|
||||
mmCoreRegions[0].baseAddress = MM_CORE_SPACE_START;
|
||||
mmCoreRegions[0].pageCount = MM_CORE_SPACE_SIZE / K_PAGE_SIZE;
|
||||
mmCoreRegions[0].core.used = false;
|
||||
mmCoreRegionCount = 1;
|
||||
}
|
||||
|
||||
{
|
||||
// Initialise kernelMMSpace.
|
||||
|
||||
KMutexAcquire(&coreMMSpace->reserveMutex);
|
||||
kernelMMSpace->data.l1Commit = (uint8_t *) MMReserve(coreMMSpace, L1_COMMIT_SIZE_BYTES, MM_REGION_NORMAL | MM_REGION_NO_COMMIT_TRACKING | MM_REGION_FIXED)->baseAddress;
|
||||
KMutexRelease(&coreMMSpace->reserveMutex);
|
||||
MMArchInitialise();
|
||||
|
||||
MMRegion *region = (MMRegion *) EsHeapAllocate(sizeof(MMRegion), true, K_CORE);
|
||||
region->baseAddress = MM_KERNEL_SPACE_START;
|
||||
|
@ -2306,25 +2280,16 @@ void MMInitialise() {
|
|||
pmm.pmManipulationRegion = (void *) MMReserve(kernelMMSpace, PHYSICAL_MEMORY_MANIPULATION_REGION_PAGES * K_PAGE_SIZE, ES_FLAGS_DEFAULT)->baseAddress;
|
||||
KMutexRelease(&kernelMMSpace->reserveMutex);
|
||||
|
||||
physicalMemoryHighest += K_PAGE_SIZE << 3; // 1 extra for the top page, then round up so the page bitset is byte-aligned.
|
||||
pmm.pageFrames = (MMPageFrame *) MMStandardAllocate(kernelMMSpace, (physicalMemoryHighest >> K_PAGE_BITS) * sizeof(MMPageFrame), MM_REGION_FIXED);
|
||||
pmm.freeOrZeroedPageBitset.Initialise(physicalMemoryHighest >> K_PAGE_BITS, true);
|
||||
// 1 extra for the top page, then round up so the page bitset is byte-aligned.
|
||||
pmm.pageFrameDatabaseCount = (MMArchGetPhysicalMemoryHighest() + (K_PAGE_SIZE << 3)) >> K_PAGE_BITS;
|
||||
|
||||
pmm.pageFrames = (MMPageFrame *) MMStandardAllocate(kernelMMSpace, pmm.pageFrameDatabaseCount * sizeof(MMPageFrame), MM_REGION_FIXED);
|
||||
pmm.freeOrZeroedPageBitset.Initialise(pmm.pageFrameDatabaseCount, true);
|
||||
|
||||
uint64_t commitLimit = 0;
|
||||
MMPhysicalInsertFreePagesStart();
|
||||
|
||||
for (uintptr_t i = 0; i < physicalMemoryRegionsCount; i++) {
|
||||
uintptr_t base = physicalMemoryRegions[i].baseAddress >> K_PAGE_BITS;
|
||||
uintptr_t count = physicalMemoryRegions[i].pageCount;
|
||||
commitLimit += count;
|
||||
|
||||
for (uintptr_t j = 0; j < count; j++) {
|
||||
MMPhysicalInsertFreePagesNext(base + j);
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t commitLimit = MMArchPopulatePageFrameDatabase();
|
||||
MMPhysicalInsertFreePagesEnd();
|
||||
physicalMemoryRegionsPagesCount = 0;
|
||||
pmm.pageFrameDatabaseInitialised = true;
|
||||
|
||||
pmm.commitLimit = pmm.commitFixedLimit = commitLimit;
|
||||
KernelLog(LOG_INFO, "Memory", "pmm initialised", "MMInitialise - PMM initialised with a fixed commit limit of %d pages.\n", pmm.commitLimit);
|
||||
|
@ -2337,7 +2302,7 @@ void MMInitialise() {
|
|||
}
|
||||
|
||||
{
|
||||
// Thread initialisation.
|
||||
// Create threads.
|
||||
|
||||
pmm.zeroPageEvent.autoReset = true;
|
||||
MMCommit(PHYSICAL_MEMORY_MANIPULATION_REGION_PAGES * K_PAGE_SIZE, true);
|
||||
|
|
|
@ -1859,7 +1859,6 @@ SYSCALL_IMPLEMENT(ES_SYSCALL_DEBUG_COMMAND) {
|
|||
|
||||
#ifdef DEBUG_BUILD
|
||||
if (argument0 == 1) {
|
||||
ArchResetCPU();
|
||||
} else if (argument0 == 2) {
|
||||
KernelPanic("Debug command 2.\n");
|
||||
} else if (argument0 == 4) {
|
||||
|
|
|
@ -356,7 +356,7 @@ void KernelPanic(const char *format, ...) {
|
|||
|
||||
KWaitKey();
|
||||
} else if (key == ES_SCANCODE_1) {
|
||||
ArchResetCPU();
|
||||
ProcessorReset();
|
||||
} else if (key == ES_SCANCODE_2) {
|
||||
EsPrint("Enter address: ");
|
||||
uintptr_t address = DebugReadNumber();
|
||||
|
|
|
@ -1,17 +1,5 @@
|
|||
#ifndef IMPLEMENTATION
|
||||
|
||||
typedef struct ACPIProcessor ArchCPU;
|
||||
|
||||
struct InterruptContext {
|
||||
uint64_t cr2, ds;
|
||||
uint8_t fxsave[512 + 16];
|
||||
uint64_t _check, cr8;
|
||||
uint64_t r15, r14, r13, r12, r11, r10, r9, r8;
|
||||
uint64_t rbp, rdi, rsi, rdx, rcx, rbx, rax;
|
||||
uint64_t interruptNumber, errorCode;
|
||||
uint64_t rip, cs, flags, rsp, ss;
|
||||
};
|
||||
|
||||
struct MMArchVAS {
|
||||
// NOTE Must be first in the structure. See ProcessorSetAddressSpace and ArchSwitchContext.
|
||||
uintptr_t cr3;
|
||||
|
@ -40,16 +28,12 @@ struct MMArchVAS {
|
|||
KMutex mutex; // Acquire to modify the page tables.
|
||||
};
|
||||
|
||||
#define MM_CORE_SPACE_START (0xFFFF800100000000)
|
||||
#define MM_CORE_SPACE_SIZE (0xFFFF8001F0000000 - 0xFFFF800100000000)
|
||||
#define MM_CORE_REGIONS_START (0xFFFF8001F0000000)
|
||||
#define MM_CORE_REGIONS_COUNT ((0xFFFF800200000000 - 0xFFFF8001F0000000) / sizeof(MMRegion))
|
||||
#define MM_KERNEL_SPACE_START (0xFFFF900000000000)
|
||||
#define MM_KERNEL_SPACE_SIZE (0xFFFFF00000000000 - 0xFFFF900000000000)
|
||||
#define MM_MODULES_START (0xFFFFFFFF90000000)
|
||||
#define MM_MODULES_SIZE (0xFFFFFFFFC0000000 - 0xFFFFFFFF90000000)
|
||||
#define MM_USER_SPACE_START (0x100000000000)
|
||||
#define MM_USER_SPACE_SIZE (0xF00000000000 - 0x100000000000)
|
||||
|
||||
#define ArchCheckBundleHeader() (header.mapAddress > 0x800000000000UL || header.mapAddress < 0x1000 || fileSize > 0x1000000000000UL)
|
||||
#define ArchCheckELFHeader() (header->virtualAddress > 0x800000000000UL || header->virtualAddress < 0x1000 || header->segmentSize > 0x1000000000000UL)
|
||||
|
@ -62,6 +46,11 @@ struct MMArchVAS {
|
|||
|
||||
#ifdef IMPLEMENTATION
|
||||
|
||||
#define MM_CORE_SPACE_START (0xFFFF800100000000)
|
||||
#define MM_CORE_SPACE_SIZE (0xFFFF8001F0000000 - 0xFFFF800100000000)
|
||||
#define MM_USER_SPACE_START (0x100000000000)
|
||||
#define MM_USER_SPACE_SIZE (0xF00000000000 - 0x100000000000)
|
||||
|
||||
struct MSIHandler {
|
||||
KIRQHandler callback;
|
||||
void *context;
|
||||
|
@ -75,6 +64,13 @@ struct IRQHandler {
|
|||
const char *cOwnerName;
|
||||
};
|
||||
|
||||
extern PhysicalMemoryRegion *physicalMemoryRegions;
|
||||
extern size_t physicalMemoryRegionsCount;
|
||||
extern size_t physicalMemoryRegionsPagesCount;
|
||||
extern size_t physicalMemoryOriginalPagesCount;
|
||||
extern size_t physicalMemoryRegionsIndex;
|
||||
extern uintptr_t physicalMemoryHighest;
|
||||
|
||||
uint8_t pciIRQLines[0x100 /* slots */][4 /* pins */];
|
||||
|
||||
MSIHandler msiHandlers[INTERRUPT_VECTOR_MSI_COUNT];
|
||||
|
@ -194,7 +190,7 @@ bool MMArchMapPage(MMSpace *space, uintptr_t physicalAddress, uintptr_t virtualA
|
|||
KernelPanic("MMArchMapPage - Physical address not page aligned.\n");
|
||||
}
|
||||
|
||||
if (pmm.pageFrames && physicalAddress < physicalMemoryHighest) {
|
||||
if (pmm.pageFrames && (physicalAddress >> K_PAGE_BITS) < pmm.pageFrameDatabaseCount) {
|
||||
if (pmm.pageFrames[physicalAddress >> K_PAGE_BITS].state != MMPageFrame::ACTIVE
|
||||
&& pmm.pageFrames[physicalAddress >> K_PAGE_BITS].state != MMPageFrame::UNUSABLE) {
|
||||
KernelPanic("MMArchMapPage - Physical page frame %x not marked as ACTIVE or UNUSABLE.\n", physicalAddress);
|
||||
|
@ -361,10 +357,13 @@ bool MMArchHandlePageFault(uintptr_t address, uint32_t flags) {
|
|||
return false;
|
||||
}
|
||||
|
||||
void MMArchInitialiseVAS() {
|
||||
void MMArchInitialise() {
|
||||
coreMMSpace->data.cr3 = kernelMMSpace->data.cr3 = ProcessorReadCR3();
|
||||
coreMMSpace->data.l1Commit = coreL1Commit;
|
||||
|
||||
mmCoreRegions[0].baseAddress = MM_CORE_SPACE_START;
|
||||
mmCoreRegions[0].pageCount = MM_CORE_SPACE_SIZE / K_PAGE_SIZE;
|
||||
|
||||
for (uintptr_t i = 0x100; i < 0x200; i++) {
|
||||
if (PAGE_TABLE_L4[i] == 0) {
|
||||
// We don't need to commit anything because the PMM isn't ready yet.
|
||||
|
@ -372,6 +371,10 @@ void MMArchInitialiseVAS() {
|
|||
EsMemoryZero((void *) (PAGE_TABLE_L3 + i * 0x200), K_PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
KMutexAcquire(&coreMMSpace->reserveMutex);
|
||||
kernelMMSpace->data.l1Commit = (uint8_t *) MMReserve(coreMMSpace, L1_COMMIT_SIZE_BYTES, MM_REGION_NORMAL | MM_REGION_NO_COMMIT_TRACKING | MM_REGION_FIXED)->baseAddress;
|
||||
KMutexRelease(&coreMMSpace->reserveMutex);
|
||||
}
|
||||
|
||||
uintptr_t MMArchTranslateAddress(MMSpace *, uintptr_t virtualAddress, bool writeAccess) {
|
||||
|
@ -549,7 +552,10 @@ InterruptContext *ArchInitialiseThread(uintptr_t kernelStack, uintptr_t kernelSt
|
|||
return context;
|
||||
}
|
||||
|
||||
bool MMArchInitialiseUserSpace(MMSpace *space) {
|
||||
bool MMArchInitialiseUserSpace(MMSpace *space, MMRegion *region) {
|
||||
region->baseAddress = MM_USER_SPACE_START;
|
||||
region->pageCount = MM_USER_SPACE_SIZE / K_PAGE_SIZE;
|
||||
|
||||
if (!MMCommit(K_PAGE_SIZE, true)) {
|
||||
return false;
|
||||
}
|
||||
|
@ -700,7 +706,7 @@ bool SetupInterruptRedirectionEntry(uintptr_t _line) {
|
|||
|
||||
for (uintptr_t i = 0; i < acpi.ioapicCount; i++) {
|
||||
ioApic = acpi.ioApics + i;
|
||||
if (line >= ioApic->gsiBase && line < (ioApic->gsiBase + (0xFF & (ioApic->ReadRegister(1) >> 16)))) {
|
||||
if (line >= ioApic->gsiBase && line < (ioApic->gsiBase + (0xFF & (ACPIIoApicReadRegister(ioApic, 1) >> 16)))) {
|
||||
foundIoApic = true;
|
||||
line -= ioApic->gsiBase;
|
||||
break;
|
||||
|
@ -723,9 +729,9 @@ bool SetupInterruptRedirectionEntry(uintptr_t _line) {
|
|||
|
||||
// Send the interrupt to the processor that registered the interrupt.
|
||||
|
||||
ioApic->WriteRegister(redirectionTableIndex, 1 << 16); // Mask the interrupt while we modify the entry.
|
||||
ioApic->WriteRegister(redirectionTableIndex + 1, GetLocalStorage()->archCPU->apicID << 24);
|
||||
ioApic->WriteRegister(redirectionTableIndex, redirectionEntry);
|
||||
ACPIIoApicWriteRegister(ioApic, redirectionTableIndex, 1 << 16); // Mask the interrupt while we modify the entry.
|
||||
ACPIIoApicWriteRegister(ioApic, redirectionTableIndex + 1, GetLocalStorage()->archCPU->apicID << 24);
|
||||
ACPIIoApicWriteRegister(ioApic, redirectionTableIndex, redirectionEntry);
|
||||
|
||||
alreadySetup |= 1 << _line;
|
||||
return true;
|
||||
|
@ -821,7 +827,7 @@ size_t ProcessorSendIPI(uintptr_t interrupt, bool nmi, int processorID) {
|
|||
size_t ignored = 0;
|
||||
|
||||
for (uintptr_t i = 0; i < acpi.processorCount; i++) {
|
||||
ACPIProcessor *processor = acpi.processors + i;
|
||||
ArchCPU *processor = acpi.processors + i;
|
||||
|
||||
if (processorID != -1) {
|
||||
if (processorID != processor->kernelProcessorID) {
|
||||
|
@ -837,11 +843,11 @@ size_t ProcessorSendIPI(uintptr_t interrupt, bool nmi, int processorID) {
|
|||
|
||||
uint32_t destination = acpi.processors[i].apicID << 24;
|
||||
uint32_t command = interrupt | (1 << 14) | (nmi ? 0x400 : 0);
|
||||
acpi.lapic.WriteRegister(0x310 >> 2, destination);
|
||||
acpi.lapic.WriteRegister(0x300 >> 2, command);
|
||||
ACPILapicWriteRegister(0x310 >> 2, destination);
|
||||
ACPILapicWriteRegister(0x300 >> 2, command);
|
||||
|
||||
// Wait for the interrupt to be sent.
|
||||
while (acpi.lapic.ReadRegister(0x300 >> 2) & (1 << 12));
|
||||
while (ACPILapicReadRegister(0x300 >> 2) & (1 << 12));
|
||||
}
|
||||
|
||||
return ignored;
|
||||
|
@ -858,10 +864,10 @@ void ProcessorSendYieldIPI(Thread *thread) {
|
|||
void ArchNextTimer(size_t ms) {
|
||||
while (!scheduler.started); // Wait until the scheduler is ready.
|
||||
GetLocalStorage()->schedulerReady = true; // Make sure this CPU can be scheduled.
|
||||
acpi.lapic.ArchNextTimer(ms); // Set the next timer.
|
||||
ACPILapicNextTimer(ms); // Set the next timer.
|
||||
}
|
||||
|
||||
NewProcessorStorage AllocateNewProcessorStorage(ACPIProcessor *archCPU) {
|
||||
NewProcessorStorage AllocateNewProcessorStorage(ArchCPU *archCPU) {
|
||||
NewProcessorStorage storage = {};
|
||||
storage.local = (CPULocalStorage *) EsHeapAllocate(sizeof(CPULocalStorage), true, K_FIXED);
|
||||
storage.gdt = (uint32_t *) MMMapPhysical(kernelMMSpace, MMPhysicalAllocate(MM_PHYSICAL_ALLOCATE_COMMIT_NOW), K_PAGE_SIZE, ES_FLAGS_DEFAULT);
|
||||
|
@ -882,19 +888,19 @@ void SetupProcessor2(NewProcessorStorage *storage) {
|
|||
uint32_t value = 2 | (1 << 10); // NMI exception interrupt vector.
|
||||
if (acpi.lapicNMIs[i].activeLow) value |= 1 << 13;
|
||||
if (acpi.lapicNMIs[i].levelTriggered) value |= 1 << 15;
|
||||
acpi.lapic.WriteRegister(registerIndex, value);
|
||||
ACPILapicWriteRegister(registerIndex, value);
|
||||
}
|
||||
}
|
||||
|
||||
acpi.lapic.WriteRegister(0x350 >> 2, acpi.lapic.ReadRegister(0x350 >> 2) & ~(1 << 16));
|
||||
acpi.lapic.WriteRegister(0x360 >> 2, acpi.lapic.ReadRegister(0x360 >> 2) & ~(1 << 16));
|
||||
acpi.lapic.WriteRegister(0x080 >> 2, 0);
|
||||
if (acpi.lapic.ReadRegister(0x30 >> 2) & 0x80000000) acpi.lapic.WriteRegister(0x410 >> 2, 0);
|
||||
acpi.lapic.EndOfInterrupt();
|
||||
ACPILapicWriteRegister(0x350 >> 2, ACPILapicReadRegister(0x350 >> 2) & ~(1 << 16));
|
||||
ACPILapicWriteRegister(0x360 >> 2, ACPILapicReadRegister(0x360 >> 2) & ~(1 << 16));
|
||||
ACPILapicWriteRegister(0x080 >> 2, 0);
|
||||
if (ACPILapicReadRegister(0x30 >> 2) & 0x80000000) ACPILapicWriteRegister(0x410 >> 2, 0);
|
||||
ACPILapicEndOfInterrupt();
|
||||
|
||||
// Configure the LAPIC's timer.
|
||||
|
||||
acpi.lapic.WriteRegister(0x3E0 >> 2, 2); // Divisor = 16
|
||||
ACPILapicWriteRegister(0x3E0 >> 2, 2); // Divisor = 16
|
||||
|
||||
// Create the processor's local storage.
|
||||
|
||||
|
@ -1130,7 +1136,7 @@ extern "C" void InterruptHandler(InterruptContext *context) {
|
|||
__sync_fetch_and_sub(&callFunctionOnAllProcessorsRemaining, 1);
|
||||
}
|
||||
|
||||
acpi.lapic.EndOfInterrupt();
|
||||
ACPILapicEndOfInterrupt();
|
||||
} else if (interrupt >= INTERRUPT_VECTOR_MSI_START && interrupt < INTERRUPT_VECTOR_MSI_START + INTERRUPT_VECTOR_MSI_COUNT && local) {
|
||||
KSpinlockAcquire(&irqHandlersLock);
|
||||
MSIHandler handler = msiHandlers[interrupt - INTERRUPT_VECTOR_MSI_START];
|
||||
|
@ -1143,7 +1149,7 @@ extern "C" void InterruptHandler(InterruptContext *context) {
|
|||
handler.callback(interrupt - INTERRUPT_VECTOR_MSI_START, handler.context);
|
||||
}
|
||||
|
||||
acpi.lapic.EndOfInterrupt();
|
||||
ACPILapicEndOfInterrupt();
|
||||
|
||||
if (local->irqSwitchThread && scheduler.started && local->schedulerReady) {
|
||||
scheduler.Yield(context);
|
||||
|
@ -1203,7 +1209,7 @@ extern "C" void InterruptHandler(InterruptContext *context) {
|
|||
GetLocalStorage()->inIRQ = false;
|
||||
}
|
||||
|
||||
acpi.lapic.EndOfInterrupt();
|
||||
ACPILapicEndOfInterrupt();
|
||||
|
||||
if (local->irqSwitchThread && scheduler.started && local->schedulerReady) {
|
||||
scheduler.Yield(context);
|
||||
|
@ -1235,7 +1241,7 @@ extern "C" bool PostContextSwitch(InterruptContext *context, MMSpace *oldAddress
|
|||
KernelLog(LOG_VERBOSE, "Arch", "executing new thread", "Executing new thread %x at %x\n", currentThread, context->rip);
|
||||
}
|
||||
|
||||
acpi.lapic.EndOfInterrupt();
|
||||
ACPILapicEndOfInterrupt();
|
||||
ContextSanityCheck(context);
|
||||
|
||||
if (ProcessorAreInterruptsEnabled()) {
|
||||
|
@ -1312,4 +1318,47 @@ EsError ArchApplyRelocation(uintptr_t type, uint8_t *buffer, uintptr_t offset, u
|
|||
return ES_SUCCESS;
|
||||
}
|
||||
|
||||
uintptr_t MMArchEarlyAllocatePage() {
|
||||
uintptr_t i = physicalMemoryRegionsIndex;
|
||||
|
||||
while (!physicalMemoryRegions[i].pageCount) {
|
||||
i++;
|
||||
|
||||
if (i == physicalMemoryRegionsCount) {
|
||||
KernelPanic("MMArchEarlyAllocatePage - Expected more pages in physical regions.\n");
|
||||
}
|
||||
}
|
||||
|
||||
PhysicalMemoryRegion *region = physicalMemoryRegions + i;
|
||||
uintptr_t returnValue = region->baseAddress;
|
||||
|
||||
region->baseAddress += K_PAGE_SIZE;
|
||||
region->pageCount--;
|
||||
physicalMemoryRegionsPagesCount--;
|
||||
physicalMemoryRegionsIndex = i;
|
||||
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
uint64_t MMArchPopulatePageFrameDatabase() {
|
||||
uint64_t commitLimit = 0;
|
||||
|
||||
for (uintptr_t i = 0; i < physicalMemoryRegionsCount; i++) {
|
||||
uintptr_t base = physicalMemoryRegions[i].baseAddress >> K_PAGE_BITS;
|
||||
uintptr_t count = physicalMemoryRegions[i].pageCount;
|
||||
commitLimit += count;
|
||||
|
||||
for (uintptr_t j = 0; j < count; j++) {
|
||||
MMPhysicalInsertFreePagesNext(base + j);
|
||||
}
|
||||
}
|
||||
|
||||
physicalMemoryRegionsPagesCount = 0;
|
||||
return commitLimit;
|
||||
}
|
||||
|
||||
uintptr_t MMArchGetPhysicalMemoryHighest() {
|
||||
return physicalMemoryHighest;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -77,6 +77,7 @@ extern "C" uintptr_t ProcessorGetRBP();
|
|||
extern "C" uint64_t ProcessorReadMXCSR();
|
||||
extern "C" void ProcessorInstallTSS(uint32_t *gdt, uint32_t *tss);
|
||||
extern "C" void ProcessorAPStartup();
|
||||
extern "C" void ProcessorReset();
|
||||
|
||||
extern "C" void SSSE3Framebuffer32To24Copy(volatile uint8_t *destination, volatile uint8_t *source, size_t pixelGroups);
|
||||
extern "C" uintptr_t _KThreadTerminate;
|
||||
|
@ -98,7 +99,7 @@ struct NewProcessorStorage {
|
|||
uint32_t *gdt;
|
||||
};
|
||||
|
||||
NewProcessorStorage AllocateNewProcessorStorage(struct ACPIProcessor *archCPU);
|
||||
NewProcessorStorage AllocateNewProcessorStorage(struct ArchCPU *archCPU);
|
||||
extern "C" void SetupProcessor2(struct NewProcessorStorage *);
|
||||
uintptr_t GetBootloaderInformationOffset();
|
||||
void ArchDelay1Ms(); // Spin for approximately 1ms. Use only during initialisation. Not thread-safe.
|
||||
|
@ -108,4 +109,14 @@ uint8_t ACPIGetCenturyRegisterIndex();
|
|||
size_t ProcessorSendIPI(uintptr_t interrupt, bool nmi = false, int processorID = -1); // Returns the number of processors the IPI was *not* sent to.
|
||||
void ArchSetPCIIRQLine(uint8_t slot, uint8_t pin, uint8_t line);
|
||||
|
||||
struct InterruptContext {
|
||||
uint64_t cr2, ds;
|
||||
uint8_t fxsave[512 + 16];
|
||||
uint64_t _check, cr8;
|
||||
uint64_t r15, r14, r13, r12, r11, r10, r9, r8;
|
||||
uint64_t rbp, rdi, rsi, rdx, rcx, rbx, rax;
|
||||
uint64_t interruptNumber, errorCode;
|
||||
uint64_t rip, cs, flags, rsp, ss;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -892,11 +892,11 @@ MMArchSafeCopy:
|
|||
mov al,0
|
||||
ret
|
||||
|
||||
[global ArchResetCPU]
|
||||
ArchResetCPU:
|
||||
[global ProcessorReset]
|
||||
ProcessorReset:
|
||||
in al,0x64
|
||||
test al,2
|
||||
jne ArchResetCPU
|
||||
jne ProcessorReset
|
||||
mov al,0xFE
|
||||
out 0x64,al
|
||||
jmp $
|
||||
|
|
Loading…
Reference in New Issue