This commit is contained in:
nakst 2021-11-07 08:27:16 +00:00
parent 751b4652c1
commit a2c6737bf5
6 changed files with 62 additions and 80 deletions

View File

@ -378,6 +378,10 @@ extern "C" void InterruptHandler(InterruptContext *context) {
CPULocalStorage *local = GetLocalStorage();
uintptr_t interrupt = context->interruptNumber;
if (local && local->spinlockCount && context->cr8 != 0xE) {
KernelPanic("InterruptHandler - Local spinlockCount is %d but interrupts were enabled (%x/%x).\n", local->spinlockCount, local, context);
}
#if 0
#ifdef EARLY_DEBUGGING
#ifdef VGA_TEXT_MODE
@ -505,8 +509,8 @@ extern "C" void InterruptHandler(InterruptContext *context) {
if (local && local->spinlockCount && ((context->cr2 >= 0xFFFF900000000000 && context->cr2 < 0xFFFFF00000000000)
|| context->cr2 < 0x8000000000000000)) {
KernelPanic("HandlePageFault - Page fault occurred in critical section at %x (S = %x, B = %x, LG = %x) (CR2 = %x).\n",
context->rip, context->rsp, context->rbp, local->currentThread->lastKnownExecutionAddress, context->cr2);
KernelPanic("HandlePageFault - Page fault occurred with spinlocks active at %x (S = %x, B = %x, LG = %x, CR2 = %x, local = %x).\n",
context->rip, context->rsp, context->rbp, local->currentThread->lastKnownExecutionAddress, context->cr2, local);
}
if (!MMArchHandlePageFault(context->cr2, MM_HANDLE_PAGE_FAULT_FOR_SUPERVISOR

View File

@ -780,6 +780,15 @@ uint64_t ArchGetTimeMs() {
}
extern "C" bool PostContextSwitch(InterruptContext *context, MMSpace *oldAddressSpace) {
if (scheduler.lock.interruptsEnabled) {
KernelPanic("PostContextSwitch - Interrupts were enabled. (3)\n");
}
// We can only free the scheduler's spinlock when we are no longer using the stack
// from the previous thread. See DoContextSwitch.
// (Another CPU can KillThread this once it's back in activeThreads.)
KSpinlockRelease(&scheduler.lock, true);
Thread *currentThread = GetCurrentThread();
#ifdef ES_ARCH_X86_64
@ -789,13 +798,10 @@ extern "C" bool PostContextSwitch(InterruptContext *context, MMSpace *oldAddress
#endif
bool newThread = currentThread->cpuTimeSlices == 1;
LapicEndOfInterrupt();
ContextSanityCheck(context);
if (ProcessorAreInterruptsEnabled()) {
KernelPanic("PostContextSwitch - Interrupts were enabled. (1)\n");
}
ProcessorSetThreadStorage(currentThread->tlsAddress);
MMSpaceCloseReference(oldAddressSpace);
#ifdef ES_ARCH_X86_64
KernelLog(LOG_VERBOSE, "Arch", "context switch", "Context switch to %zthread %x at %x\n", newThread ? "new " : "", currentThread, context->rip);
@ -805,22 +811,13 @@ extern "C" bool PostContextSwitch(InterruptContext *context, MMSpace *oldAddress
currentThread->lastKnownExecutionAddress = context->eip;
#endif
if (scheduler.lock.interruptsEnabled) {
KernelPanic("PostContextSwitch - Interrupts were enabled. (3)\n");
}
ProcessorSetThreadStorage(currentThread->tlsAddress);
// We can only free the scheduler's spinlock when we are no longer using the stack
// from the previous thread. See DoContextSwitch.
// (Another CPU can KillThread this once it's back in activeThreads.)
KSpinlockRelease(&scheduler.lock, true);
if (ProcessorAreInterruptsEnabled()) {
KernelPanic("PostContextSwitch - Interrupts were enabled. (2)\n");
}
MMSpaceCloseReference(oldAddressSpace);
if (local->spinlockCount) {
KernelPanic("PostContextSwitch - spinlockCount is non-zero (%x).\n", local);
}
#ifdef ES_ARCH_X86_32
if (context->fromRing0) {
@ -841,7 +838,7 @@ extern "C" bool PostContextSwitch(InterruptContext *context, MMSpace *oldAddress
}
bool SetupInterruptRedirectionEntry(uintptr_t _line) {
KSpinlockAssertLocked(&scheduler.lock);
KSpinlockAssertLocked(&irqHandlersLock);
static uint32_t alreadySetup = 0;
@ -944,9 +941,6 @@ KMSIInformation KRegisterMSI(KIRQHandler handler, void *context, const char *cOw
}
bool KRegisterIRQ(intptr_t line, KIRQHandler handler, void *context, const char *cOwnerName, KPCIDevice *pciDevice) {
KSpinlockAcquire(&scheduler.lock);
EsDefer(KSpinlockRelease(&scheduler.lock));
if (line == -1 && !pciDevice) {
KernelPanic("KRegisterIRQ - Interrupt line is %d, and pciDevice is %x.\n", line, pciDevice);
}
@ -970,27 +964,29 @@ bool KRegisterIRQ(intptr_t line, KIRQHandler handler, void *context, const char
}
}
KSpinlockRelease(&irqHandlersLock);
bool result = true;
if (!found) {
KernelLog(LOG_ERROR, "Arch", "too many IRQ handlers", "The limit of IRQ handlers was reached (%d), and the handler for '%z' was not registered.\n",
sizeof(irqHandlers) / sizeof(irqHandlers[0]), cOwnerName);
return false;
}
KernelLog(LOG_INFO, "Arch", "register IRQ", "KRegisterIRQ - Registered IRQ %d to '%z'.\n", line, cOwnerName);
if (line != -1) {
if (!SetupInterruptRedirectionEntry(line)) {
return false;
}
result = false;
} else {
SetupInterruptRedirectionEntry(9);
SetupInterruptRedirectionEntry(10);
SetupInterruptRedirectionEntry(11);
KernelLog(LOG_INFO, "Arch", "register IRQ", "KRegisterIRQ - Registered IRQ %d to '%z'.\n", line, cOwnerName);
if (line != -1) {
if (!SetupInterruptRedirectionEntry(line)) {
result = false;
}
} else {
SetupInterruptRedirectionEntry(9);
SetupInterruptRedirectionEntry(10);
SetupInterruptRedirectionEntry(11);
}
}
return true;
KSpinlockRelease(&irqHandlersLock);
return result;
}
void ArchStartupApplicationProcessors() {

View File

@ -1096,6 +1096,10 @@ EsError CCSpaceAccess(CCSpace *cache, K_USER_BUFFER void *_buffer, EsFileOffset
copy:;
if (GetLocalStorage()->spinlockCount) {
KernelPanic("CCSpaceAccess - Spinlocks acquired.\n");
}
// Copy into/from the user's buffer.
if (buffer) {

View File

@ -227,7 +227,7 @@ void CloseHandleToObject(void *object, KernelObjectType type, uint32_t flags) {
case KERNEL_OBJECT_THREAD: {
KSpinlockAcquire(&scheduler.lock);
Thread *thread = (Thread *) object;
if (!thread->handles) KernelPanic("CloseHandleToThread - All handles to the thread have been closed.\n");
if (!thread->handles) KernelPanic("CloseHandleToObject - All handles to thread %x have been closed.\n", thread);
thread->handles--;
bool removeThread = thread->handles == 0;
// EsPrint("Thread %d has %d handles\n", thread->id, thread->handles);

View File

@ -241,7 +241,7 @@ struct Scheduler {
LinkedList<KTimer> activeTimers;
Pool threadPool, processPool, mmSpacePool;
LinkedList<Thread> allThreads;
LinkedList<Thread> allThreads;
LinkedList<Process> allProcesses;
EsObjectID nextThreadID;
EsObjectID nextProcessID;
@ -251,7 +251,7 @@ struct Scheduler {
uint64_t timeMs;
unsigned currentProcessorID;
uint32_t currentProcessorID;
#ifdef DEBUG_BUILD
EsThreadEventLogEntry *volatile threadEventLog;
@ -375,12 +375,11 @@ void Scheduler::InsertNewThread(Thread *thread, bool addToActiveList, Process *o
EsDefer(KSpinlockRelease(&lock));
// New threads are initialised here.
thread->id = nextThreadID++;
thread->id = __sync_fetch_and_add(&nextThreadID, 1);
thread->process = owner;
owner->handles++; // Each thread owns a handles to the owner process.
// This makes sure the process isn't destroyed before all its threads have been destroyed.
// EsPrint("Open handle to process %d/%x (new thread). New handle count: %d.\n", owner->id, owner, owner->handles);
owner->handles++; // Each thread owns a handles to the owner process.
// This makes sure the process isn't destroyed before all its threads have been destroyed.
thread->item.thisItem = thread;
thread->allItem.thisItem = thread;
@ -893,10 +892,7 @@ Process *Scheduler::SpawnProcess(ProcessType processType) {
return nullptr;
}
KSpinlockAcquire(&scheduler.lock);
process->id = nextProcessID++;
KSpinlockRelease(&scheduler.lock);
process->id = __sync_fetch_and_add(&nextProcessID, 1);
process->vmm->referenceCount = 1;
process->allItem.thisItem = process;
process->handles = 1;
@ -958,18 +954,12 @@ void Scheduler::CreateProcessorThreads(CPULocalStorage *local) {
idleThread->terminatableState = THREAD_IN_SYSCALL;
idleThread->cName = "Idle";
local->currentThread = local->idleThread = idleThread;
local->processorID = __sync_fetch_and_add(&currentProcessorID, 1);
KSpinlockAcquire(&lock);
if (currentProcessorID >= K_MAX_PROCESSORS) {
KernelPanic("Scheduler::CreateProcessorThreads - Maximum processor count (%d) exceeded.\n", currentProcessorID);
if (local->processorID >= K_MAX_PROCESSORS) {
KernelPanic("Scheduler::CreateProcessorThreads - Maximum processor count (%d) exceeded.\n", local->processorID);
}
local->processorID = currentProcessorID++;
// Force release the lock because we've changed our currentThread value.
KSpinlockRelease(&lock);
InsertNewThread(idleThread, false, kernelProcess);
local->asyncTaskThread = SpawnThread("AsyncTasks", (uintptr_t) AsyncTaskThread, 0, SPAWN_THREAD_MANUALLY_ACTIVATED);
@ -1204,10 +1194,6 @@ void Scheduler::Yield(InterruptContext *context) {
return;
}
if (local->spinlockCount) {
KernelPanic("Scheduler::Yield - Spinlocks acquired while attempting to yield (2).\n");
}
if (local->spinlockCount) {
KernelPanic("Scheduler::Yield - Spinlocks acquired while attempting to yield.\n");
}

View File

@ -94,20 +94,6 @@ void KSpinlockAssertLocked(KSpinlock *spinlock) {
}
}
Thread *AttemptMutexAcquisition(KMutex *mutex, Thread *currentThread) {
KSpinlockAcquire(&scheduler.lock);
Thread *old = mutex->owner;
if (!old) {
mutex->owner = currentThread;
}
KSpinlockRelease(&scheduler.lock);
return old;
}
#ifdef DEBUG_BUILD
bool _KMutexAcquire(KMutex *mutex, const char *cMutexString, const char *cFile, int line) {
#else
@ -140,7 +126,13 @@ bool KMutexAcquire(KMutex *mutex) {
KernelPanic("KMutex::Acquire - Trying to acquire a mutex while interrupts are disabled.\n");
}
while (AttemptMutexAcquisition(mutex, currentThread)) {
while (true) {
KSpinlockAcquire(&scheduler.lock);
Thread *old = mutex->owner;
if (!old) mutex->owner = currentThread;
KSpinlockRelease(&scheduler.lock);
if (!old) break;
__sync_synchronize();
if (GetLocalStorage() && GetLocalStorage()->schedulerReady) {
@ -440,6 +432,7 @@ bool KWriterLockTake(KWriterLock *lock, bool write, bool poll) {
if (thread) {
thread->blocking.writerLock = lock;
thread->blocking.writerLockType = write;
__sync_synchronize();
}
while (true) {
@ -639,15 +632,14 @@ void Scheduler::WaitMutex(KMutex *mutex) {
KernelPanic("Scheduler::WaitMutex - Attempting to wait on a mutex in a non-active thread.\n");
}
KSpinlockAcquire(&lock);
thread->state = THREAD_WAITING_MUTEX;
thread->blocking.mutex = mutex;
__sync_synchronize();
thread->state = THREAD_WAITING_MUTEX;
KSpinlockAcquire(&lock);
// Is the owner of this mutex executing?
// If not, there's no point in spinning on it.
bool spin = mutex && mutex->owner && mutex->owner->executing;
KSpinlockRelease(&lock);
if (!spin && thread->blocking.mutex->owner) {