simplify KEventSet

This commit is contained in:
nakst 2021-11-08 21:59:42 +00:00
parent 85909972dc
commit 97ae44396b
13 changed files with 72 additions and 81 deletions

View File

@ -364,7 +364,7 @@ bool AHCIController::HandleIRQ() {
}
port->runningCommands = 0;
KEventSet(&port->commandSlotsAvailable, false, true /* maybe already set */);
KEventSet(&port->commandSlotsAvailable, true /* maybe already set */);
// Restart command processing.
@ -392,7 +392,7 @@ bool AHCIController::HandleIRQ() {
port->commandContexts[j]->End(true /* success */);
port->commandContexts[j] = nullptr;
KEventSet(&port->commandSlotsAvailable, false, true /* maybe already set */);
KEventSet(&port->commandSlotsAvailable, true /* maybe already set */);
port->runningCommands &= ~(1 << j);
commandCompleted = true;

View File

@ -280,7 +280,7 @@ bool Controller::HandleIRQ() {
if (cause & (1 << 2)) {
KernelLog(LOG_INFO, "I8254x", "link status change", "Link is now %z.\n",
(RD_REGISTER_STATUS() & (1 << 1)) ? "up" : "down");
KEventSet(&receiveEvent, false, true);
KEventSet(&receiveEvent, true);
}
if (cause & (1 << 6)) {
@ -288,7 +288,7 @@ bool Controller::HandleIRQ() {
}
if (cause & ((1 << 6) | (1 << 7) | (1 << 4))) {
KEventSet(&receiveEvent, false, true);
KEventSet(&receiveEvent, true);
}
return true;

View File

@ -433,7 +433,7 @@ bool NVMeController::HandleIRQ() {
__sync_synchronize();
ioSubmissionQueueHead = *(uint16_t *) (ioCompletionQueue + ioCompletionQueueHead * COMPLETION_QUEUE_ENTRY_BYTES + 8);
KEventSet(&ioSubmissionQueueNonFull, false, true);
KEventSet(&ioSubmissionQueueNonFull, true);
// Advance the queue head.

View File

@ -685,7 +685,7 @@ bool XHCIController::HandleIRQ() {
KernelLog(LOG_INFO, "xHCI", "port enabled", "Port %d has been enabled.\n", port);
ports[port].statusChangeEvent = true;
ports[port].enabled = true;
KEventSet(&portStatusChangeEvent, false, true);
KEventSet(&portStatusChangeEvent, true);
} else if (ports[port].usb2 && (linkState == 7 || linkState == 4) && (~status & (1 << 4))) {
KernelLog(LOG_INFO, "xHCI", "port reset", "Attempting to reset USB 2 port %d... (1)\n", port);
WR_REGISTER_PORTSC(port, (status & (1 << 9)) | (1 << 4));
@ -693,7 +693,7 @@ bool XHCIController::HandleIRQ() {
KernelLog(LOG_INFO, "xHCI", "port detach", "Device detached from port %d.\n", port);
ports[port].statusChangeEvent = true;
ports[port].enabled = false;
KEventSet(&portStatusChangeEvent, false, true);
KEventSet(&portStatusChangeEvent, true);
}
KSpinlockRelease(&portResetSpinlock);

View File

@ -247,7 +247,7 @@ void CCWriteSectionPrepare(CCActiveSection *section) {
section->accessors = 1;
if (!activeSectionManager.modifiedList.count) KEventReset(&activeSectionManager.modifiedNonEmpty);
if (activeSectionManager.modifiedList.count < CC_MODIFIED_GETTING_FULL) KEventReset(&activeSectionManager.modifiedGettingFull);
KEventSet(&activeSectionManager.modifiedNonFull, false, true);
KEventSet(&activeSectionManager.modifiedNonFull, true);
}
void CCWriteSection(CCActiveSection *section) {
@ -286,7 +286,7 @@ void CCWriteSection(CCActiveSection *section) {
EsMemoryZero(section->modifiedPages, sizeof(section->modifiedPages));
__sync_synchronize();
KEventSet(&section->writeCompleteEvent);
KEventSet(&section->cache->writeComplete, false, true);
KEventSet(&section->cache->writeComplete, true);
if (!section->accessors) {
if (section->loading) KernelPanic("CCSpaceAccess - Active section %x with no accessors is loading.", section);
@ -378,10 +378,10 @@ void CCActiveSectionReturnToLists(CCActiveSection *section, bool writeBack) {
}
if (activeSectionManager.modifiedList.count >= CC_MODIFIED_GETTING_FULL) {
KEventSet(&activeSectionManager.modifiedGettingFull, false, true);
KEventSet(&activeSectionManager.modifiedGettingFull, true);
}
KEventSet(&activeSectionManager.modifiedNonEmpty, false, true);
KEventSet(&activeSectionManager.modifiedNonEmpty, true);
activeSectionManager.modifiedList.InsertEnd(&section->listItem);
} else {

View File

@ -1023,7 +1023,7 @@ void FSUnmountFileSystem(uintptr_t argument) {
KernelLog(LOG_INFO, "FS", "unmount complete", "Unmounted file system %x.\n", fileSystem);
KDeviceCloseHandle(fileSystem);
__sync_fetch_and_sub(&fs.fileSystemsUnmounting, 1);
KEventSet(&fs.fileSystemUnmounted, false, true);
KEventSet(&fs.fileSystemUnmounted, true);
}
//////////////////////////////////////////

View File

@ -312,11 +312,11 @@ GlobalData *globalData; // Shared with all processes.
void MMUpdateAvailablePageCount(bool increase) {
if (MM_AVAILABLE_PAGES() >= MM_CRITICAL_AVAILABLE_PAGES_THRESHOLD) {
KEventSet(&pmm.availableNotCritical, false, true);
KEventSet(&pmm.availableNotCritical, true);
KEventReset(&pmm.availableCritical);
} else {
KEventReset(&pmm.availableNotCritical);
KEventSet(&pmm.availableCritical, false, true);
KEventSet(&pmm.availableCritical, true);
if (!increase) {
KernelLog(LOG_ERROR, "Memory", "critical page limit hit",
@ -327,7 +327,7 @@ void MMUpdateAvailablePageCount(bool increase) {
if (MM_AVAILABLE_PAGES() >= MM_LOW_AVAILABLE_PAGES_THRESHOLD) {
KEventReset(&pmm.availableLow);
} else {
KEventSet(&pmm.availableLow, false, true);
KEventSet(&pmm.availableLow, true);
}
}
@ -372,7 +372,7 @@ void MMPhysicalInsertFreePagesNext(uintptr_t page) {
void MMPhysicalInsertFreePagesEnd() {
if (pmm.countFreePages > MM_ZERO_PAGE_THRESHOLD) {
KEventSet(&pmm.zeroPageEvent, false, true);
KEventSet(&pmm.zeroPageEvent, true);
}
MMUpdateAvailablePageCount(true);
@ -1183,7 +1183,7 @@ bool MMCommit(uint64_t bytes, bool fixed) {
}
if (MM_OBJECT_CACHE_SHOULD_TRIM()) {
KEventSet(&pmm.trimObjectCaches, false, true);
KEventSet(&pmm.trimObjectCaches, true);
}
} else {
// We haven't started tracking commit counts yet.
@ -2094,7 +2094,7 @@ void MMObjectCacheInsert(MMObjectCache *cache, MMObjectCacheItem *item) {
__sync_fetch_and_add(&pmm.approximateTotalObjectCacheBytes, cache->averageObjectBytes);
if (MM_OBJECT_CACHE_SHOULD_TRIM()) {
KEventSet(&pmm.trimObjectCaches, false, true);
KEventSet(&pmm.trimObjectCaches, true);
}
KSpinlockRelease(&cache->lock);

View File

@ -273,7 +273,7 @@ struct KEvent { // Waiting and notifying. Can wait on multiple at once. Can be s
volatile size_t handles;
};
bool KEventSet(KEvent *event, bool schedulerAlreadyLocked = false, bool maybeAlreadySet = false);
bool KEventSet(KEvent *event, bool maybeAlreadySet = false);
void KEventReset(KEvent *event);
bool KEventPoll(KEvent *event); // TODO Remove this! Currently it is only used by KAudioFillBuffersFromMixer.
bool KEventWait(KEvent *event, uint64_t timeoutMs = ES_WAIT_NO_TIMEOUT); // See KWaitEvents to wait for multiple events. Returns false if the wait timed out.

View File

@ -276,7 +276,7 @@ void CloseHandleToObject(void *object, KernelObjectType type, uint32_t flags) {
if (!previous) KernelPanic("CloseHandleToObject - Window %x has no handles.\n", window);
if (previous == 2) {
KEventSet(&windowManager.windowsToCloseEvent, false, true /* maybe already set */);
KEventSet(&windowManager.windowsToCloseEvent, true /* maybe already set */);
} else if (previous == 1) {
window->Destroy();
}
@ -288,7 +288,7 @@ void CloseHandleToObject(void *object, KernelObjectType type, uint32_t flags) {
if (!previous) KernelPanic("CloseHandleToObject - EmbeddedWindow %x has no handles.\n", window);
if (previous == 2) {
KEventSet(&windowManager.windowsToCloseEvent, false, true /* maybe already set */);
KEventSet(&windowManager.windowsToCloseEvent, true /* maybe already set */);
} else if (previous == 1) {
window->Destroy();
}
@ -321,7 +321,7 @@ void CloseHandleToObject(void *object, KernelObjectType type, uint32_t flags) {
if (!pipe->readers) {
// If there are no more readers, wake up any blocking writers.
KEventSet(&pipe->canWrite, false, true);
KEventSet(&pipe->canWrite, true);
}
}
@ -330,7 +330,7 @@ void CloseHandleToObject(void *object, KernelObjectType type, uint32_t flags) {
if (!pipe->writers) {
// If there are no more writers, wake up any blocking readers.
KEventSet(&pipe->canRead, false, true);
KEventSet(&pipe->canRead, true);
}
}
@ -621,7 +621,7 @@ size_t Pipe::Access(void *_buffer, size_t bytes, bool write, bool user) {
_buffer = (uint8_t *) _buffer + toWrite;
amount += toWrite;
KEventSet(&canRead, false, true);
KEventSet(&canRead, true);
if (!readers) {
// EsPrint("\tPipe closed\n");
@ -654,7 +654,7 @@ size_t Pipe::Access(void *_buffer, size_t bytes, bool write, bool user) {
_buffer = (uint8_t *) _buffer + toRead;
amount += toRead;
KEventSet(&canWrite, false, true);
KEventSet(&canWrite, true);
if (!writers) {
// Nobody is writing to the pipe, so there's no point reading from it.
@ -724,7 +724,7 @@ bool MessageQueue::SendMessage(_EsMessageWithObject *_message) {
}
}
KEventSet(&notEmpty, false, true);
KEventSet(&notEmpty, true);
return true;
}

View File

@ -520,14 +520,10 @@ namespace POSIX {
}
KSpinlockAcquire(&scheduler.dispatchSpinlock);
process->posixForking = false;
if (process->allThreadsTerminated) {
KEventSet(&process->killedEvent, true);
}
bool setKilledEvent = process->allThreadsTerminated;
KSpinlockRelease(&scheduler.dispatchSpinlock);
if (setKilledEvent) KEventSet(&process->killedEvent, true);
EsHeapFree(path, 0, K_FIXED);
CloseHandleToObject(process->executableMainThread, KERNEL_OBJECT_THREAD);

View File

@ -1,3 +1,7 @@
// TODO Review vforking interaction from the POSIX subsystem with the process termination algorithm.
// TODO Simplify or remove asynchronous task thread semantics.
// TODO Break up or remove dispatchSpinlock.
#ifndef IMPLEMENTATION
#define THREAD_PRIORITY_NORMAL (0) // Lower value = higher priority.
@ -523,9 +527,7 @@ void KillProcess(Process *process) {
KMutexRelease(&scheduler.allProcessesMutex);
KSpinlockAcquire(&scheduler.dispatchSpinlock);
process->allThreadsTerminated = true;
bool setProcessKilledEvent = true;
#ifdef ENABLE_POSIX_SUBSYSTEM
@ -537,13 +539,13 @@ void KillProcess(Process *process) {
}
#endif
KSpinlockRelease(&scheduler.dispatchSpinlock);
if (setProcessKilledEvent) {
// We can now also set the killed event on the process.
KEventSet(&process->killedEvent, true);
}
KSpinlockRelease(&scheduler.dispatchSpinlock);
// There are no threads left in this process.
// We should destroy the handle table at this point.
// Otherwise, the process might never be freed
@ -1152,6 +1154,36 @@ void Scheduler::Yield(InterruptContext *context) {
return;
}
if (!local->processorID) {
// Update the scheduler's time.
timeMs = ArchGetTimeMs();
globalData->schedulerTimeMs = timeMs;
// Notify the necessary timers.
KSpinlockAcquire(&activeTimersSpinlock);
LinkedItem<KTimer> *_timer = activeTimers.firstItem;
while (_timer) {
KTimer *timer = _timer->thisItem;
LinkedItem<KTimer> *next = _timer->nextItem;
if (timer->triggerTimeMs <= timeMs) {
activeTimers.Remove(_timer);
KEventSet(&timer->event);
if (timer->callback) {
KRegisterAsyncTask(&timer->asyncTask, timer->callback);
}
} else {
break; // Timers are kept sorted, so there's no point continuing.
}
_timer = next;
}
KSpinlockRelease(&activeTimersSpinlock);
}
if (local->spinlockCount) {
KernelPanic("Scheduler::Yield - Spinlocks acquired while attempting to yield.\n");
}
@ -1238,36 +1270,6 @@ void Scheduler::Yield(InterruptContext *context) {
}
}
if (!local->processorID) {
// Update the scheduler's time.
timeMs = ArchGetTimeMs();
globalData->schedulerTimeMs = timeMs;
// Notify the necessary timers.
KSpinlockAcquire(&activeTimersSpinlock);
LinkedItem<KTimer> *_timer = activeTimers.firstItem;
while (_timer) {
KTimer *timer = _timer->thisItem;
LinkedItem<KTimer> *next = _timer->nextItem;
if (timer->triggerTimeMs <= timeMs) {
activeTimers.Remove(_timer);
KEventSet(&timer->event, true /* scheduler already locked */);
if (timer->callback) {
KRegisterAsyncTask(&timer->asyncTask, timer->callback);
}
} else {
break; // Timers are kept sorted, so there's no point continuing.
}
_timer = next;
}
KSpinlockRelease(&activeTimersSpinlock);
}
// Get the next thread to execute.
Thread *newThread = local->currentThread = PickThread(local);

View File

@ -306,17 +306,12 @@ void KSemaphoreSet(KSemaphore *semaphore, uintptr_t u) {
KMutexRelease(&semaphore->mutex);
}
bool KEventSet(KEvent *event, bool schedulerAlreadyLocked, bool maybeAlreadySet) {
bool KEventSet(KEvent *event, bool maybeAlreadySet) {
if (event->state && !maybeAlreadySet) {
KernelLog(LOG_ERROR, "Synchronisation", "event already set", "KEvent::Set - Attempt to set a event that had already been set\n");
KernelLog(LOG_ERROR, "Synchronisation", "event already set", "KEventSet - Attempt to set a event that had already been set\n");
}
if (!schedulerAlreadyLocked) {
KSpinlockAcquire(&scheduler.dispatchSpinlock);
} else {
KSpinlockAssertLocked(&scheduler.dispatchSpinlock);
}
KSpinlockAcquire(&scheduler.dispatchSpinlock);
volatile bool unblockedThreads = false;
if (!event->state) {
@ -327,14 +322,12 @@ bool KEventSet(KEvent *event, bool schedulerAlreadyLocked, bool maybeAlreadySet)
unblockedThreads = true;
}
scheduler.NotifyObject(&event->blockedThreads, !event->autoReset /* if this is a manually reset event, unblock all the waiting threads */);
// If this is a manually reset event, unblock all the waiting threads.
scheduler.NotifyObject(&event->blockedThreads, !event->autoReset);
}
}
if (!schedulerAlreadyLocked) {
KSpinlockRelease(&scheduler.dispatchSpinlock);
}
KSpinlockRelease(&scheduler.dispatchSpinlock);
return unblockedThreads;
}

View File

@ -767,7 +767,7 @@ SYSCALL_IMPLEMENT(ES_SYSCALL_FILE_RESIZE) {
SYSCALL_IMPLEMENT(ES_SYSCALL_EVENT_SET) {
SYSCALL_HANDLE(argument0, KERNEL_OBJECT_EVENT, event, KEvent);
KEventSet(event, false, true);
KEventSet(event, true);
SYSCALL_RETURN(ES_SUCCESS, false);
}
@ -1220,7 +1220,7 @@ SYSCALL_IMPLEMENT(ES_SYSCALL_PROCESS_GET_STATE) {
SYSCALL_IMPLEMENT(ES_SYSCALL_SHUTDOWN) {
SYSCALL_PERMISSION(ES_PERMISSION_SHUTDOWN);
shutdownAction = argument0;
KEventSet(&shutdownEvent, false, true);
KEventSet(&shutdownEvent, true);
SYSCALL_RETURN(ES_SUCCESS, false);
}