rename scheduler lock to dispatch lock

This commit is contained in:
nakst 2021-11-08 21:49:26 +00:00
parent 455a213349
commit 85909972dc
5 changed files with 52 additions and 44 deletions

View File

@ -780,14 +780,14 @@ uint64_t ArchGetTimeMs() {
}
extern "C" bool PostContextSwitch(InterruptContext *context, MMSpace *oldAddressSpace) {
if (scheduler.lock.interruptsEnabled) {
if (scheduler.dispatchSpinlock.interruptsEnabled) {
KernelPanic("PostContextSwitch - Interrupts were enabled. (3)\n");
}
// We can only free the scheduler's spinlock when we are no longer using the stack
// from the previous thread. See DoContextSwitch.
// (Another CPU can KillThread this once it's back in activeThreads.)
KSpinlockRelease(&scheduler.lock, true);
KSpinlockRelease(&scheduler.dispatchSpinlock, true);
Thread *currentThread = GetCurrentThread();

View File

@ -208,7 +208,15 @@ bool OpenHandleToObject(void *object, KernelObjectType type, uint32_t flags) {
void CloseHandleToObject(void *object, KernelObjectType type, uint32_t flags) {
switch (type) {
case KERNEL_OBJECT_PROCESS: {
CloseHandleToProcess(object);
Process *process = (Process *) object;
uintptr_t previous = __sync_fetch_and_sub(&process->handles, 1);
KernelLog(LOG_VERBOSE, "Scheduler", "close process handle", "Closed handle to process %d; %d handles remain.\n", process->id, process->handles);
if (previous == 0) {
KernelPanic("CloseHandleToProcess - All handles to process %x have been closed.\n", process);
} else if (previous == 1) {
scheduler.RemoveProcess(process);
}
} break;
case KERNEL_OBJECT_THREAD: {

View File

@ -519,7 +519,7 @@ namespace POSIX {
return -ENOMEM;
}
KSpinlockAcquire(&scheduler.lock);
KSpinlockAcquire(&scheduler.dispatchSpinlock);
process->posixForking = false;
@ -527,7 +527,7 @@ namespace POSIX {
KEventSet(&process->killedEvent, true);
}
KSpinlockRelease(&scheduler.lock);
KSpinlockRelease(&scheduler.dispatchSpinlock);
EsHeapFree(path, 0, K_FIXED);
CloseHandleToObject(process->executableMainThread, KERNEL_OBJECT_THREAD);

View File

@ -229,7 +229,7 @@ struct Scheduler {
// Variables:
KSpinlock lock; // The general lock. TODO Break this up!
KSpinlock dispatchSpinlock; // For accessing synchronisation objects, thread states, scheduling lists, etc. TODO Break this up!
KMutex allThreadsMutex; // For accessing the allThreads list.
KMutex allProcessesMutex; // For accessing the allProcesses list.
KSpinlock activeTimersSpinlock; // For accessing the activeTimers lists.
@ -283,7 +283,7 @@ void KRegisterAsyncTask(KAsyncTask *task, KAsyncTaskCallback callback) {
}
int8_t Scheduler::GetThreadEffectivePriority(Thread *thread) {
KSpinlockAssertLocked(&lock);
KSpinlockAssertLocked(&dispatchSpinlock);
for (int8_t i = 0; i < thread->priority; i++) {
if (thread->blockedThreadPriorities[i]) {
@ -305,7 +305,7 @@ void Scheduler::AddActiveThread(Thread *thread, bool start) {
return;
}
KSpinlockAssertLocked(&lock);
KSpinlockAssertLocked(&dispatchSpinlock);
if (thread->state != THREAD_ACTIVE) {
KernelPanic("Scheduler::AddActiveThread - Thread %d not active\n", thread->id);
@ -343,7 +343,7 @@ void Scheduler::MaybeUpdateActiveList(Thread *thread) {
KernelPanic("Scheduler::MaybeUpdateActiveList - Trying to update the active list of a non-normal thread %x.\n", thread);
}
KSpinlockAssertLocked(&lock);
KSpinlockAssertLocked(&dispatchSpinlock);
if (thread->state != THREAD_ACTIVE || thread->executing) {
// The thread is not currently in an active list,
@ -397,9 +397,9 @@ void Scheduler::InsertNewThread(Thread *thread, bool addToActiveList, Process *o
if (addToActiveList) {
// Add the thread to the start of the active thread list to make sure that it runs immediately.
KSpinlockAcquire(&lock);
KSpinlockAcquire(&dispatchSpinlock);
AddActiveThread(thread, true);
KSpinlockRelease(&lock);
KSpinlockRelease(&dispatchSpinlock);
} else {
// Idle and asynchronous task threads don't need to be added to a scheduling list.
}
@ -522,7 +522,7 @@ void KillProcess(Process *process) {
KMutexRelease(&scheduler.allProcessesMutex);
KSpinlockAcquire(&scheduler.lock);
KSpinlockAcquire(&scheduler.dispatchSpinlock);
process->allThreadsTerminated = true;
@ -542,7 +542,7 @@ void KillProcess(Process *process) {
KEventSet(&process->killedEvent, true);
}
KSpinlockRelease(&scheduler.lock);
KSpinlockRelease(&scheduler.dispatchSpinlock);
// There are no threads left in this process.
// We should destroy the handle table at this point.
@ -647,7 +647,7 @@ void Scheduler::TerminateThread(Thread *thread) {
// Else, is the user waiting on a mutex/event?
// If we aren't currently executing the thread, unblock the thread.
KSpinlockAcquire(&scheduler.lock);
KSpinlockAcquire(&scheduler.dispatchSpinlock);
bool yield = false;
@ -666,7 +666,7 @@ void Scheduler::TerminateThread(Thread *thread) {
// Mark the thread as terminatable.
thread->terminatableState = THREAD_TERMINATABLE;
KSpinlockRelease(&scheduler.lock);
KSpinlockRelease(&scheduler.dispatchSpinlock);
// We cannot return to the previous function as it expects to be killed.
ProcessorFakeTimerInterrupt();
@ -713,7 +713,7 @@ void Scheduler::TerminateThread(Thread *thread) {
done:;
KSpinlockRelease(&scheduler.lock);
KSpinlockRelease(&scheduler.dispatchSpinlock);
if (yield) ProcessorFakeTimerInterrupt(); // Process the asynchronous task.
}
@ -829,16 +829,16 @@ bool Process::Start(char *imagePath, size_t imagePathLength) {
bool Process::StartWithNode(KNode *node) {
// Make sure nobody has tried to start the process.
KSpinlockAcquire(&scheduler.lock);
KSpinlockAcquire(&scheduler.dispatchSpinlock);
if (executableStartRequest) {
KSpinlockRelease(&scheduler.lock);
KSpinlockRelease(&scheduler.dispatchSpinlock);
return false;
}
executableStartRequest = true;
KSpinlockRelease(&scheduler.lock);
KSpinlockRelease(&scheduler.dispatchSpinlock);
// Get the name of the process from the node.
@ -924,14 +924,14 @@ Process *Scheduler::SpawnProcess(ProcessType processType) {
}
void Scheduler::SetTemporaryAddressSpace(MMSpace *space) {
KSpinlockAcquire(&lock);
KSpinlockAcquire(&dispatchSpinlock);
Thread *thread = GetCurrentThread();
MMSpace *oldSpace = thread->temporaryAddressSpace ?: kernelMMSpace;
thread->temporaryAddressSpace = space;
MMSpace *newSpace = space ?: kernelMMSpace;
MMSpaceOpenReference(newSpace);
ProcessorSetAddressSpace(&newSpace->data);
KSpinlockRelease(&lock);
KSpinlockRelease(&dispatchSpinlock);
MMSpaceCloseReference(oldSpace);
}
@ -1068,7 +1068,7 @@ void Scheduler::CrashProcess(Process *process, EsCrashReason *crashReason) {
}
void Scheduler::PauseThread(Thread *thread, bool resume) {
KSpinlockAcquire(&lock);
KSpinlockAcquire(&dispatchSpinlock);
if (thread->paused == !resume) {
return;
@ -1080,7 +1080,7 @@ void Scheduler::PauseThread(Thread *thread, bool resume) {
if (thread->state == THREAD_ACTIVE) {
if (thread->executing) {
if (thread == GetCurrentThread()) {
KSpinlockRelease(&lock);
KSpinlockRelease(&dispatchSpinlock);
// Yield.
ProcessorFakeTimerInterrupt();
@ -1109,7 +1109,7 @@ void Scheduler::PauseThread(Thread *thread, bool resume) {
AddActiveThread(thread, false);
}
KSpinlockRelease(&lock);
KSpinlockRelease(&dispatchSpinlock);
}
void Scheduler::PauseProcess(Process *process, bool resume) {
@ -1126,7 +1126,7 @@ void Scheduler::PauseProcess(Process *process, bool resume) {
}
Thread *Scheduler::PickThread(CPULocalStorage *local) {
KSpinlockAssertLocked(&lock);
KSpinlockAssertLocked(&dispatchSpinlock);
if ((local->asyncTaskList.first || local->inAsyncTask) && local->asyncTaskThread->state == THREAD_ACTIVE) {
// If the asynchronous task thread for this processor isn't blocked, and has tasks to process, execute it.
@ -1157,9 +1157,9 @@ void Scheduler::Yield(InterruptContext *context) {
}
ProcessorDisableInterrupts(); // We don't want interrupts to get reenabled after the context switch.
KSpinlockAcquire(&lock);
KSpinlockAcquire(&dispatchSpinlock);
if (lock.interruptsEnabled) {
if (dispatchSpinlock.interruptsEnabled) {
KernelPanic("Scheduler::Yield - Interrupts were enabled when scheduler lock was acquired.\n");
}

View File

@ -127,10 +127,10 @@ bool KMutexAcquire(KMutex *mutex) {
}
while (true) {
KSpinlockAcquire(&scheduler.lock);
KSpinlockAcquire(&scheduler.dispatchSpinlock);
Thread *old = mutex->owner;
if (!old) mutex->owner = currentThread;
KSpinlockRelease(&scheduler.lock);
KSpinlockRelease(&scheduler.dispatchSpinlock);
if (!old) break;
__sync_synchronize();
@ -193,7 +193,7 @@ void KMutexRelease(KMutex *mutex) {
KMutexAssertLocked(mutex);
Thread *currentThread = GetCurrentThread();
KSpinlockAcquire(&scheduler.lock);
KSpinlockAcquire(&scheduler.dispatchSpinlock);
#ifdef DEBUG_BUILD
// EsPrint("$%x:%x:0\n", owner, id);
@ -211,7 +211,7 @@ void KMutexRelease(KMutex *mutex) {
scheduler.NotifyObject(&mutex->blockedThreads, true, currentThread);
}
KSpinlockRelease(&scheduler.lock);
KSpinlockRelease(&scheduler.dispatchSpinlock);
__sync_synchronize();
#ifdef DEBUG_BUILD
@ -312,9 +312,9 @@ bool KEventSet(KEvent *event, bool schedulerAlreadyLocked, bool maybeAlreadySet)
}
if (!schedulerAlreadyLocked) {
KSpinlockAcquire(&scheduler.lock);
KSpinlockAcquire(&scheduler.dispatchSpinlock);
} else {
KSpinlockAssertLocked(&scheduler.lock);
KSpinlockAssertLocked(&scheduler.dispatchSpinlock);
}
volatile bool unblockedThreads = false;
@ -332,7 +332,7 @@ bool KEventSet(KEvent *event, bool schedulerAlreadyLocked, bool maybeAlreadySet)
}
if (!schedulerAlreadyLocked) {
KSpinlockRelease(&scheduler.lock);
KSpinlockRelease(&scheduler.dispatchSpinlock);
}
return unblockedThreads;
@ -396,7 +396,7 @@ void KWriterLockAssertExclusive(KWriterLock *lock) {
}
void KWriterLockReturn(KWriterLock *lock, bool write) {
KSpinlockAcquire(&scheduler.lock);
KSpinlockAcquire(&scheduler.dispatchSpinlock);
if (lock->state == -1) {
if (!write) {
@ -418,7 +418,7 @@ void KWriterLockReturn(KWriterLock *lock, bool write) {
scheduler.NotifyObject(&lock->blockedThreads, true);
}
KSpinlockRelease(&scheduler.lock);
KSpinlockRelease(&scheduler.dispatchSpinlock);
}
bool KWriterLockTake(KWriterLock *lock, bool write, bool poll) {
@ -436,7 +436,7 @@ bool KWriterLockTake(KWriterLock *lock, bool write, bool poll) {
}
while (true) {
KSpinlockAcquire(&scheduler.lock);
KSpinlockAcquire(&scheduler.dispatchSpinlock);
if (write) {
if (lock->state == 0) {
@ -453,7 +453,7 @@ bool KWriterLockTake(KWriterLock *lock, bool write, bool poll) {
}
}
KSpinlockRelease(&scheduler.lock);
KSpinlockRelease(&scheduler.dispatchSpinlock);
if (poll || done) {
break;
@ -491,11 +491,11 @@ void KWriterLockTakeMultiple(KWriterLock **locks, size_t lockCount, bool write)
}
void KWriterLockConvertExclusiveToShared(KWriterLock *lock) {
KSpinlockAcquire(&scheduler.lock);
KSpinlockAcquire(&scheduler.dispatchSpinlock);
KWriterLockAssertExclusive(lock);
lock->state = 1;
scheduler.NotifyObject(&lock->blockedThreads, true);
KSpinlockRelease(&scheduler.lock);
KSpinlockRelease(&scheduler.dispatchSpinlock);
}
#if 0
@ -638,11 +638,11 @@ void Scheduler::WaitMutex(KMutex *mutex) {
__sync_synchronize();
thread->state = THREAD_WAITING_MUTEX;
KSpinlockAcquire(&lock);
KSpinlockAcquire(&dispatchSpinlock);
// Is the owner of this mutex executing?
// If not, there's no point in spinning on it.
bool spin = mutex && mutex->owner && mutex->owner->executing;
KSpinlockRelease(&lock);
KSpinlockRelease(&dispatchSpinlock);
if (!spin && thread->blocking.mutex->owner) {
ProcessorFakeTimerInterrupt();
@ -711,7 +711,7 @@ uintptr_t KWaitEvents(KEvent **events, size_t count) {
}
void Scheduler::UnblockThread(Thread *unblockedThread, Thread *previousMutexOwner) {
KSpinlockAssertLocked(&lock);
KSpinlockAssertLocked(&dispatchSpinlock);
if (unblockedThread->state == THREAD_WAITING_MUTEX) {
if (unblockedThread->item.list) {
@ -778,7 +778,7 @@ void Scheduler::UnblockThread(Thread *unblockedThread, Thread *previousMutexOwne
}
void Scheduler::NotifyObject(LinkedList<Thread> *blockedThreads, bool unblockAll, Thread *previousMutexOwner) {
KSpinlockAssertLocked(&lock);
KSpinlockAssertLocked(&dispatchSpinlock);
LinkedItem<Thread> *unblockedItem = blockedThreads->firstItem;