mirror of https://gitlab.com/nakst/essence
simplify async tasks
This commit is contained in:
parent
f7894f74a6
commit
ee4a0e7a05
|
@ -14,10 +14,12 @@ struct ACPIThermalZone : KDevice {
|
|||
uint64_t pollingFrequency; // Recommended polling frequency of temperature, in tenths of a seconds.
|
||||
uint64_t currentTemperature;
|
||||
KMutex refreshMutex;
|
||||
KAsyncTask refreshTemperatureAsyncTask;
|
||||
KAsyncTask refreshThresholdsAsyncTask;
|
||||
};
|
||||
|
||||
static void ACPIThermalRefreshTemperature(EsGeneric context) {
|
||||
ACPIThermalZone *device = (ACPIThermalZone *) context.p;
|
||||
static void ACPIThermalRefreshTemperature(KAsyncTask *task) {
|
||||
ACPIThermalZone *device = EsContainerOf(ACPIThermalZone, refreshTemperatureAsyncTask, task);
|
||||
KACPIObject *object = device->object;
|
||||
KMutexAcquire(&device->refreshMutex);
|
||||
KernelLog(LOG_INFO, "ACPIThermal", "temperature", "Taking temperature reading...\n");
|
||||
|
@ -31,8 +33,8 @@ static void ACPIThermalRefreshTemperature(EsGeneric context) {
|
|||
KMutexRelease(&device->refreshMutex);
|
||||
}
|
||||
|
||||
static void ACPIThermalRefreshThresholds(EsGeneric context) {
|
||||
ACPIThermalZone *device = (ACPIThermalZone *) context.p;
|
||||
static void ACPIThermalRefreshThresholds(KAsyncTask *task) {
|
||||
ACPIThermalZone *device = EsContainerOf(ACPIThermalZone, refreshThresholdsAsyncTask, task);
|
||||
KACPIObject *object = device->object;
|
||||
KMutexAcquire(&device->refreshMutex);
|
||||
KernelLog(LOG_INFO, "ACPIThermal", "threshold", "Taking threshold readings...\n");
|
||||
|
@ -60,16 +62,16 @@ static void ACPIThermalRefreshThresholds(EsGeneric context) {
|
|||
}
|
||||
|
||||
KMutexRelease(&device->refreshMutex);
|
||||
ACPIThermalRefreshTemperature(device);
|
||||
ACPIThermalRefreshTemperature(&device->refreshTemperatureAsyncTask);
|
||||
}
|
||||
|
||||
static void ACPIThermalDeviceNotificationHandler(KACPIObject *, uint32_t value, EsGeneric context) {
|
||||
ACPIThermalZone *device = (ACPIThermalZone *) context.p;
|
||||
|
||||
if (value == 0x80) {
|
||||
KRegisterAsyncTask(ACPIThermalRefreshTemperature, device);
|
||||
KRegisterAsyncTask(&device->refreshTemperatureAsyncTask, ACPIThermalRefreshTemperature);
|
||||
} else if (value == 0x81) {
|
||||
KRegisterAsyncTask(ACPIThermalRefreshThresholds, device);
|
||||
KRegisterAsyncTask(&device->refreshThresholdsAsyncTask, ACPIThermalRefreshThresholds);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -80,7 +82,7 @@ static void ACPIThermalDeviceAttach(KDevice *parent) {
|
|||
device->object = object;
|
||||
KernelLog(LOG_INFO, "ACPIThermal", "device attached", "Found ACPI thermal zone.\n");
|
||||
|
||||
ACPIThermalRefreshThresholds(device);
|
||||
ACPIThermalRefreshThresholds(&device->refreshThresholdsAsyncTask);
|
||||
|
||||
EsError error;
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ uint8_t acpicaPageBuffer[K_PAGE_SIZE];
|
|||
KMutex acpicaPageBufferMutex;
|
||||
char acpiPrintf[4096];
|
||||
bool acpiOSLayerActive = false;
|
||||
KAsyncTask powerButtonAsyncTask;
|
||||
|
||||
ES_EXTERN_C ACPI_STATUS AcpiOsInitialize() {
|
||||
if (acpiOSLayerActive) KernelPanic("AcpiOsInitialize - ACPI has already been initialised.\n");
|
||||
|
@ -419,11 +420,11 @@ ES_EXTERN_C ACPI_STATUS AcpiOsEnterSleep(UINT8 sleepState, UINT32 registerAValue
|
|||
}
|
||||
|
||||
UINT32 ACPIPowerButtonPressed(void *) {
|
||||
KRegisterAsyncTask([] (EsGeneric) {
|
||||
KRegisterAsyncTask(&powerButtonAsyncTask, [] (KAsyncTask *) {
|
||||
_EsMessageWithObject m = { nullptr, ES_MSG_POWER_BUTTON_PRESSED };
|
||||
if (scheduler.shutdown) return;
|
||||
if (desktopProcess) desktopProcess->messageQueue.SendMessage(&m);
|
||||
}, nullptr, false);
|
||||
});
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -410,8 +410,8 @@ bool AHCIController::HandleIRQ() {
|
|||
return true;
|
||||
}
|
||||
|
||||
void TimeoutTimerHit(EsGeneric argument) {
|
||||
AHCIController *controller = (AHCIController *) argument.p;
|
||||
void TimeoutTimerHit(KAsyncTask *task) {
|
||||
AHCIController *controller = EsContainerOf(AHCIController, timeoutTimer.asyncTask, task);
|
||||
|
||||
uint64_t currentTimeStamp = KGetTimeInMs();
|
||||
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
#include <arch/x86_pc.h>
|
||||
|
||||
struct PS2Update {
|
||||
KAsyncTask task;
|
||||
|
||||
union {
|
||||
struct {
|
||||
volatile int xMovement, yMovement, zMovement;
|
||||
|
@ -193,8 +195,8 @@ uint16_t scancodeConversionTable2[] = {
|
|||
#define PS2_MOUSE_READ (0xEB)
|
||||
#define PS2_MOUSE_RESOLUTION (0xE8)
|
||||
|
||||
void PS2MouseUpdated(EsGeneric _update) {
|
||||
PS2Update *update = (PS2Update *) _update.p;
|
||||
void PS2MouseUpdated(KAsyncTask *task) {
|
||||
PS2Update *update = EsContainerOf(PS2Update, task, task);
|
||||
|
||||
KMouseUpdateData data = {
|
||||
.xMovement = update->xMovement * K_CURSOR_MOVEMENT_SCALE,
|
||||
|
@ -206,8 +208,8 @@ void PS2MouseUpdated(EsGeneric _update) {
|
|||
KMouseUpdate(&data);
|
||||
}
|
||||
|
||||
void PS2KeyboardUpdated(EsGeneric _update) {
|
||||
PS2Update *update = (PS2Update *) _update.p;
|
||||
void PS2KeyboardUpdated(KAsyncTask *task) {
|
||||
PS2Update *update = EsContainerOf(PS2Update, task, task);
|
||||
KernelLog(LOG_VERBOSE, "PS/2", "keyboard update", "Received scancode %x.\n", update->scancode);
|
||||
KKeyPress(update->scancode);
|
||||
}
|
||||
|
@ -349,7 +351,7 @@ bool PS2IRQHandler(uintptr_t interruptIndex, void *) {
|
|||
| ((firstByte & (1 << 2)) ? K_MIDDLE_BUTTON : 0);
|
||||
update->zMovement = -((int8_t) fourthByte);
|
||||
|
||||
KRegisterAsyncTask(PS2MouseUpdated, update, false);
|
||||
KRegisterAsyncTask(&update->task, PS2MouseUpdated);
|
||||
|
||||
firstByte = 0;
|
||||
secondByte = 0;
|
||||
|
@ -366,7 +368,7 @@ bool PS2IRQHandler(uintptr_t interruptIndex, void *) {
|
|||
ps2.lastUpdatesIndex = (ps2.lastUpdatesIndex + 1) % 16;
|
||||
KSpinlockRelease(&ps2.lastUpdatesLock);
|
||||
update->scancode = scancode;
|
||||
KRegisterAsyncTask(PS2KeyboardUpdated, update, false);
|
||||
KRegisterAsyncTask(&update->task, PS2KeyboardUpdated);
|
||||
}
|
||||
} else {
|
||||
KernelPanic("PS2IRQHandler - Incorrect interrupt index.\n", interruptIndex);
|
||||
|
|
|
@ -99,6 +99,7 @@ struct XHCIEndpoint {
|
|||
|
||||
KUSBTransferCallback callback;
|
||||
EsGeneric context;
|
||||
KAsyncTask callbackAsyncTask;
|
||||
|
||||
bool CreateTransferRing();
|
||||
|
||||
|
@ -645,12 +646,12 @@ bool XHCIController::HandleIRQ() {
|
|||
}
|
||||
|
||||
if (endpoint->callback) {
|
||||
KRegisterAsyncTask([] (EsGeneric argument) {
|
||||
XHCIEndpoint *endpoint = (XHCIEndpoint *) argument.p;
|
||||
KRegisterAsyncTask(&endpoint->callbackAsyncTask, [] (KAsyncTask *task) {
|
||||
XHCIEndpoint *endpoint = EsContainerOf(XHCIEndpoint, callbackAsyncTask, task);
|
||||
KUSBTransferCallback callback = endpoint->callback;
|
||||
endpoint->callback = nullptr;
|
||||
callback((endpoint->lastStatus >> 24) != 1 ? -1 : endpoint->lastStatus & 0xFFFFFF, endpoint->context);
|
||||
}, endpoint, true);
|
||||
});
|
||||
}
|
||||
|
||||
break;
|
||||
|
|
|
@ -70,30 +70,19 @@
|
|||
|
||||
#include ARCH_KERNEL_SOURCE
|
||||
|
||||
struct AsyncTask {
|
||||
KAsyncTaskCallback callback;
|
||||
void *argument;
|
||||
struct MMSpace *addressSpace;
|
||||
};
|
||||
|
||||
struct CPULocalStorage {
|
||||
struct Thread *currentThread,
|
||||
*idleThread,
|
||||
*asyncTaskThread;
|
||||
|
||||
struct InterruptContext *panicContext;
|
||||
|
||||
bool irqSwitchThread, schedulerReady, inIRQ;
|
||||
|
||||
unsigned processorID;
|
||||
size_t spinlockCount;
|
||||
|
||||
struct ArchCPU *archCPU;
|
||||
|
||||
// TODO Have separate interrupt task threads and system worker threads (with no task limit).
|
||||
#define MAX_ASYNC_TASKS (256)
|
||||
volatile AsyncTask asyncTasks[MAX_ASYNC_TASKS];
|
||||
volatile uint8_t asyncTasksRead, asyncTasksWrite;
|
||||
struct Thread *currentThread; // The currently executing thread on this CPU.
|
||||
struct Thread *idleThread; // The CPU's idle thread.
|
||||
struct Thread *asyncTaskThread; // The CPU's async task thread, used to process the asyncTaskList.
|
||||
struct InterruptContext *panicContext; // The interrupt context saved from a kernel panic IPI.
|
||||
bool irqSwitchThread; // The CPU should call Scheduler::Yield after the IRQ handler exits.
|
||||
bool schedulerReady; // The CPU is ready to execute threads from the pre-emptive scheduler.
|
||||
bool inIRQ; // The CPU is currently executing an IRQ handler registered with KRegisterIRQ.
|
||||
bool inAsyncTask; // The CPU is currently executing an asynchronous task.
|
||||
uint32_t processorID; // The scheduler's ID for the process.
|
||||
size_t spinlockCount; // The number of spinlocks currently acquired.
|
||||
struct ArchCPU *archCPU; // The architecture layer's data for the CPU.
|
||||
SimpleList asyncTaskList; // The list of AsyncTasks to be processed.
|
||||
};
|
||||
|
||||
struct PhysicalMemoryRegion {
|
||||
|
|
|
@ -82,6 +82,8 @@ struct MMSpace {
|
|||
bool user; // Regions in the space may be accessed from userspace.
|
||||
uint64_t commit; // An *approximate* commit in pages. TODO Better memory usage tracking.
|
||||
uint64_t reserve; // The number of reserved pages.
|
||||
|
||||
KAsyncTask removeAsyncTask; // The asynchronous task for deallocating the memory space once it's no longer in use.
|
||||
};
|
||||
|
||||
// A physical page of memory.
|
||||
|
@ -2252,11 +2254,11 @@ void MMSpaceCloseReference(MMSpace *space) {
|
|||
return;
|
||||
}
|
||||
|
||||
KRegisterAsyncTask([] (EsGeneric _space) {
|
||||
MMSpace *space = (MMSpace *) _space.p;
|
||||
KRegisterAsyncTask(&space->removeAsyncTask, [] (KAsyncTask *task) {
|
||||
MMSpace *space = EsContainerOf(MMSpace, removeAsyncTask, task);
|
||||
MMArchFinalizeVAS(space);
|
||||
scheduler.mmSpacePool.Remove(space);
|
||||
}, space);
|
||||
});
|
||||
}
|
||||
|
||||
void MMInitialise() {
|
||||
|
|
|
@ -102,18 +102,6 @@ struct KMSIInformation {
|
|||
KMSIInformation KRegisterMSI(KIRQHandler handler, void *context, const char *cOwnerName);
|
||||
void KUnregisterMSI(uintptr_t tag);
|
||||
|
||||
// ---------------------------------------------------------------------------------------------------------------
|
||||
// Async tasks.
|
||||
// ---------------------------------------------------------------------------------------------------------------
|
||||
|
||||
// Async tasks are executed on the same processor that registered it.
|
||||
// They can be registered with interrupts disabled (e.g. in IRQ handlers).
|
||||
// They are executed in the order they were registered.
|
||||
// They can acquire mutexes, but cannot perform IO.
|
||||
|
||||
typedef void (*KAsyncTaskCallback)(EsGeneric argument);
|
||||
void KRegisterAsyncTask(KAsyncTaskCallback callback, EsGeneric argument, bool needed = true);
|
||||
|
||||
// ---------------------------------------------------------------------------------------------------------------
|
||||
// Common data types, algorithms and things.
|
||||
// ---------------------------------------------------------------------------------------------------------------
|
||||
|
@ -145,6 +133,24 @@ extern "C" uint16_t ProcessorIn16(uint16_t port);
|
|||
extern "C" void ProcessorOut32(uint16_t port, uint32_t value);
|
||||
extern "C" uint32_t ProcessorIn32(uint16_t port);
|
||||
|
||||
// ---------------------------------------------------------------------------------------------------------------
|
||||
// Async tasks.
|
||||
// ---------------------------------------------------------------------------------------------------------------
|
||||
|
||||
// Async tasks are executed on the same processor that registered it.
|
||||
// They can be registered with interrupts disabled (e.g. in IRQ handlers).
|
||||
// They are executed in the order they were registered.
|
||||
// They can acquire mutexes, but cannot perform IO.
|
||||
|
||||
typedef void (*KAsyncTaskCallback)(struct KAsyncTask *task);
|
||||
|
||||
struct KAsyncTask {
|
||||
SimpleList item;
|
||||
KAsyncTaskCallback callback;
|
||||
};
|
||||
|
||||
void KRegisterAsyncTask(KAsyncTask *task, KAsyncTaskCallback callback);
|
||||
|
||||
// ---------------------------------------------------------------------------------------------------------------
|
||||
// Kernel core.
|
||||
// ---------------------------------------------------------------------------------------------------------------
|
||||
|
@ -313,6 +319,7 @@ void KSemaphoreSet(KSemaphore *semaphore, uintptr_t units = 1);
|
|||
|
||||
struct KTimer {
|
||||
KEvent event;
|
||||
KAsyncTask asyncTask;
|
||||
K_PRIVATE
|
||||
LinkedItem<KTimer> item;
|
||||
uint64_t triggerTimeMs;
|
||||
|
|
|
@ -5,11 +5,6 @@
|
|||
#define THREAD_PRIORITY_COUNT (2)
|
||||
|
||||
void CloseHandleToProcess(void *_thread);
|
||||
void KillThread(EsGeneric _thread);
|
||||
|
||||
void RegisterAsyncTask(KAsyncTaskCallback callback, EsGeneric argument, struct Process *targetProcess,
|
||||
bool needed /* if false, the task may not be registered if there are many queued tasks */,
|
||||
bool unlocked = false /* set to true if you haven't acquired the scheduler's lock */);
|
||||
|
||||
enum ThreadState : int8_t {
|
||||
THREAD_ACTIVE, // An active thread. Not necessarily executing; `executing` determines if it executing.
|
||||
|
@ -88,6 +83,7 @@ struct Thread {
|
|||
} blocking;
|
||||
|
||||
KEvent killedEvent;
|
||||
KAsyncTask killAsyncTask;
|
||||
|
||||
// If the type of the thread is THREAD_ASYNC_TASK,
|
||||
// then this is the virtual address space that should be loaded
|
||||
|
@ -142,6 +138,7 @@ struct Process {
|
|||
bool terminating; // This never gets set if TerminateProcess is not called, and instead the process is killed because all its threads exit naturally.
|
||||
int exitStatus; // TODO Remove this.
|
||||
KEvent killedEvent;
|
||||
KAsyncTask removeAsyncTask;
|
||||
|
||||
// Executable state:
|
||||
uint8_t executableState;
|
||||
|
@ -266,14 +263,23 @@ struct Scheduler {
|
|||
Process _kernelProcess;
|
||||
Process *kernelProcess = &_kernelProcess;
|
||||
Process *desktopProcess;
|
||||
|
||||
extern Scheduler scheduler;
|
||||
Scheduler scheduler;
|
||||
KSpinlock asyncTaskSpinlock;
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef IMPLEMENTATION
|
||||
|
||||
Scheduler scheduler;
|
||||
void KRegisterAsyncTask(KAsyncTask *task, KAsyncTaskCallback callback) {
|
||||
KSpinlockAcquire(&asyncTaskSpinlock);
|
||||
|
||||
if (!task->callback) {
|
||||
task->callback = callback;
|
||||
GetLocalStorage()->asyncTaskList.Insert(&task->item, false);
|
||||
}
|
||||
|
||||
KSpinlockRelease(&asyncTaskSpinlock);
|
||||
}
|
||||
|
||||
int8_t Scheduler::GetThreadEffectivePriority(Thread *thread) {
|
||||
KSpinlockAssertLocked(&lock);
|
||||
|
@ -477,6 +483,108 @@ Thread *Scheduler::SpawnThread(const char *cName, uintptr_t startAddress, uintpt
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
void _RemoveProcess(KAsyncTask *task) {
|
||||
Process *process = EsContainerOf(Process, removeAsyncTask, task);
|
||||
GetCurrentThread()->SetAddressSpace(process->vmm);
|
||||
scheduler.RemoveProcess(process);
|
||||
}
|
||||
|
||||
void CloseHandleToProcess(void *_process) {
|
||||
KSpinlockAcquire(&scheduler.lock);
|
||||
Process *process = (Process *) _process;
|
||||
if (!process->handles) KernelPanic("CloseHandleToProcess - All handles to the process have been closed.\n");
|
||||
process->handles--;
|
||||
bool removeProcess = !process->handles;
|
||||
|
||||
KernelLog(LOG_VERBOSE, "Scheduler", "close process handle", "Closed handle to process %d; %d handles remain.\n", process->id, process->handles);
|
||||
|
||||
if (removeProcess && process->executableStartRequest) {
|
||||
// This must be done in the correct virtual address space!
|
||||
KRegisterAsyncTask(&process->removeAsyncTask, _RemoveProcess);
|
||||
}
|
||||
|
||||
KSpinlockRelease(&scheduler.lock);
|
||||
|
||||
if (removeProcess && !process->executableStartRequest) {
|
||||
// The process was never started, so we can't make a RemoveProcess task, because it doesn't have an MMSpace yet.
|
||||
scheduler.RemoveProcess(process);
|
||||
}
|
||||
|
||||
ProcessorFakeTimerInterrupt(); // Process the asynchronous task.
|
||||
}
|
||||
|
||||
void KillProcess(Process *process) {
|
||||
KernelLog(LOG_INFO, "Scheduler", "killing process", "Killing process (%d) %x...\n", process->id, process);
|
||||
|
||||
process->allThreadsTerminated = true;
|
||||
scheduler.activeProcessCount--;
|
||||
|
||||
bool setProcessKilledEvent = true;
|
||||
|
||||
#ifdef ENABLE_POSIX_SUBSYSTEM
|
||||
if (process->posixForking) {
|
||||
// If the process is from an incomplete vfork(),
|
||||
// then the parent process gets to set the killed event
|
||||
// and the exit status.
|
||||
setProcessKilledEvent = false;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (setProcessKilledEvent) {
|
||||
// We can now also set the killed event on the process.
|
||||
KEventSet(&process->killedEvent, true);
|
||||
}
|
||||
|
||||
KSpinlockRelease(&scheduler.lock);
|
||||
|
||||
// There are no threads left in this process.
|
||||
// We should destroy the handle table at this point.
|
||||
// Otherwise, the process might never be freed
|
||||
// because of a cyclic-dependency.
|
||||
process->handleTable.Destroy();
|
||||
|
||||
// Destroy the virtual memory space.
|
||||
// Don't actually deallocate it yet though; that is done on an async task queued by RemoveProcess.
|
||||
// This must be destroyed after the handle table!
|
||||
MMSpaceDestroy(process->vmm);
|
||||
|
||||
// Tell Desktop the process has terminated.
|
||||
if (!scheduler.shutdown) {
|
||||
_EsMessageWithObject m;
|
||||
EsMemoryZero(&m, sizeof(m));
|
||||
m.message.type = ES_MSG_PROCESS_TERMINATED;
|
||||
m.message.crash.pid = process->id;
|
||||
desktopProcess->messageQueue.SendMessage(&m);
|
||||
}
|
||||
}
|
||||
|
||||
void KillThread(KAsyncTask *task) {
|
||||
Thread *thread = EsContainerOf(Thread, killAsyncTask, task);
|
||||
GetCurrentThread()->SetAddressSpace(thread->process->vmm);
|
||||
|
||||
KSpinlockAcquire(&scheduler.lock);
|
||||
scheduler.allThreads.Remove(&thread->allItem);
|
||||
thread->process->threads.Remove(&thread->processItem);
|
||||
|
||||
KernelLog(LOG_INFO, "Scheduler", "killing thread",
|
||||
"Killing thread (ID %d, %d remain in process %d) %x...\n", thread->id, thread->process->threads.count, thread->process->id, thread);
|
||||
|
||||
if (thread->process->threads.count == 0) {
|
||||
KillProcess(thread->process); // Releases the scheduler's lock.
|
||||
} else {
|
||||
KSpinlockRelease(&scheduler.lock);
|
||||
}
|
||||
|
||||
MMFree(kernelMMSpace, (void *) thread->kernelStackBase);
|
||||
if (thread->userStackBase) MMFree(thread->process->vmm, (void *) thread->userStackBase);
|
||||
|
||||
KEventSet(&thread->killedEvent);
|
||||
|
||||
// Close the handle that this thread owns of its owner process, and the handle it owns of itself.
|
||||
CloseHandleToObject(thread->process, KERNEL_OBJECT_PROCESS);
|
||||
CloseHandleToObject(thread, KERNEL_OBJECT_THREAD);
|
||||
}
|
||||
|
||||
void Scheduler::TerminateProcess(Process *process, int status) {
|
||||
KSpinlockAcquire(&scheduler.lock);
|
||||
|
||||
|
@ -573,7 +681,7 @@ void Scheduler::TerminateThread(Thread *thread, bool terminatingProcess) {
|
|||
// The thread is terminatable and it isn't executing.
|
||||
// Remove it from its queue, and then remove the thread.
|
||||
thread->item.RemoveFromList();
|
||||
RegisterAsyncTask(KillThread, thread, thread->process, true);
|
||||
KRegisterAsyncTask(&thread->killAsyncTask, KillThread);
|
||||
yield = true;
|
||||
}
|
||||
} else if (thread->terminatableState == THREAD_USER_BLOCK_REQUEST) {
|
||||
|
@ -823,14 +931,20 @@ void AsyncTaskThread() {
|
|||
CPULocalStorage *local = GetLocalStorage();
|
||||
|
||||
while (true) {
|
||||
if (local->asyncTasksRead == local->asyncTasksWrite) {
|
||||
if (!local->asyncTaskList.first) {
|
||||
ProcessorFakeTimerInterrupt();
|
||||
} else {
|
||||
volatile AsyncTask *task = local->asyncTasks + local->asyncTasksRead;
|
||||
if (task->addressSpace) local->currentThread->SetAddressSpace(task->addressSpace);
|
||||
task->callback(task->argument);
|
||||
local->currentThread->SetAddressSpace(nullptr);
|
||||
local->asyncTasksRead++;
|
||||
KSpinlockAcquire(&asyncTaskSpinlock);
|
||||
SimpleList *item = local->asyncTaskList.first;
|
||||
KAsyncTask *task = EsContainerOf(KAsyncTask, item, item);
|
||||
KAsyncTaskCallback callback = task->callback;
|
||||
task->callback = nullptr;
|
||||
local->inAsyncTask = true;
|
||||
item->Remove();
|
||||
KSpinlockRelease(&asyncTaskSpinlock);
|
||||
callback(task); // This may cause the task to be deallocated.
|
||||
local->currentThread->SetAddressSpace(nullptr); // The task may have modified the address space.
|
||||
local->inAsyncTask = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -862,46 +976,6 @@ void Scheduler::CreateProcessorThreads(CPULocalStorage *local) {
|
|||
local->asyncTaskThread->type = THREAD_ASYNC_TASK;
|
||||
}
|
||||
|
||||
void RegisterAsyncTask(KAsyncTaskCallback callback, EsGeneric argument, Process *targetProcess, bool needed, bool unlocked) {
|
||||
if (!unlocked) KSpinlockAssertLocked(&scheduler.lock);
|
||||
else KSpinlockAcquire(&scheduler.lock);
|
||||
EsDefer(if (unlocked) KSpinlockRelease(&scheduler.lock));
|
||||
|
||||
if (targetProcess == nullptr) {
|
||||
targetProcess = kernelProcess;
|
||||
}
|
||||
|
||||
CPULocalStorage *local = GetLocalStorage();
|
||||
|
||||
int difference = local->asyncTasksWrite - local->asyncTasksRead;
|
||||
if (difference < 0) difference += MAX_ASYNC_TASKS;
|
||||
|
||||
if (difference >= MAX_ASYNC_TASKS / 2 && !needed) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (difference == MAX_ASYNC_TASKS - 1) {
|
||||
KernelPanic("RegisterAsyncTask - Maximum number of queued asynchronous tasks reached.\n");
|
||||
}
|
||||
|
||||
// We need to register tasks for terminating processes.
|
||||
#if 0
|
||||
if (!targetProcess->handles) {
|
||||
KernelPanic("RegisterAsyncTask - Process has no handles.\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
volatile AsyncTask *task = local->asyncTasks + local->asyncTasksWrite;
|
||||
task->callback = callback;
|
||||
task->argument = argument.p;
|
||||
task->addressSpace = targetProcess->vmm;
|
||||
local->asyncTasksWrite++;
|
||||
}
|
||||
|
||||
void KRegisterAsyncTask(KAsyncTaskCallback callback, EsGeneric argument, bool needed) {
|
||||
RegisterAsyncTask(callback, argument, kernelProcess, needed, true);
|
||||
}
|
||||
|
||||
void Scheduler::RemoveProcess(Process *process) {
|
||||
KernelLog(LOG_INFO, "Scheduler", "remove process", "Removing process %d.\n", process->id);
|
||||
|
||||
|
@ -948,11 +1022,8 @@ void Scheduler::RemoveProcess(Process *process) {
|
|||
|
||||
// Free the process.
|
||||
|
||||
KRegisterAsyncTask([] (EsGeneric _process) {
|
||||
Process *process = (Process *) _process.p;
|
||||
MMSpaceCloseReference(process->vmm);
|
||||
scheduler.processPool.Remove(process);
|
||||
}, process);
|
||||
MMSpaceCloseReference(process->vmm);
|
||||
scheduler.processPool.Remove(process);
|
||||
|
||||
if (started) {
|
||||
// If all processes (except the kernel process) have terminated, set the scheduler's killedEvent.
|
||||
|
@ -1106,110 +1177,10 @@ void Scheduler::PauseProcess(Process *process, bool resume) {
|
|||
}
|
||||
}
|
||||
|
||||
void _RemoveProcess(EsGeneric process) {
|
||||
scheduler.RemoveProcess((Process *) process.p);
|
||||
}
|
||||
|
||||
void CloseHandleToProcess(void *_process) {
|
||||
KSpinlockAcquire(&scheduler.lock);
|
||||
Process *process = (Process *) _process;
|
||||
if (!process->handles) KernelPanic("CloseHandleToProcess - All handles to the process have been closed.\n");
|
||||
process->handles--;
|
||||
bool removeProcess = !process->handles;
|
||||
|
||||
KernelLog(LOG_VERBOSE, "Scheduler", "close process handle", "Closed handle to process %d; %d handles remain.\n", process->id, process->handles);
|
||||
|
||||
if (removeProcess && process->executableStartRequest) {
|
||||
// This must be done in the correct virtual address space!
|
||||
RegisterAsyncTask(_RemoveProcess, process, process, true);
|
||||
}
|
||||
|
||||
KSpinlockRelease(&scheduler.lock);
|
||||
|
||||
if (removeProcess && !process->executableStartRequest) {
|
||||
// The process was never started, so we can't make a RemoveProcess task, because it doesn't have an MMSpace yet.
|
||||
scheduler.RemoveProcess(process);
|
||||
}
|
||||
|
||||
ProcessorFakeTimerInterrupt(); // Process the asynchronous task.
|
||||
}
|
||||
|
||||
void KillProcess(Process *process) {
|
||||
KernelLog(LOG_INFO, "Scheduler", "killing process", "Killing process (%d) %x...\n", process->id, process);
|
||||
|
||||
process->allThreadsTerminated = true;
|
||||
scheduler.activeProcessCount--;
|
||||
|
||||
bool setProcessKilledEvent = true;
|
||||
|
||||
#ifdef ENABLE_POSIX_SUBSYSTEM
|
||||
if (process->posixForking) {
|
||||
// If the process is from an incomplete vfork(),
|
||||
// then the parent process gets to set the killed event
|
||||
// and the exit status.
|
||||
setProcessKilledEvent = false;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (setProcessKilledEvent) {
|
||||
// We can now also set the killed event on the process.
|
||||
KEventSet(&process->killedEvent, true);
|
||||
}
|
||||
|
||||
KSpinlockRelease(&scheduler.lock);
|
||||
|
||||
// There are no threads left in this process.
|
||||
// We should destroy the handle table at this point.
|
||||
// Otherwise, the process might never be freed
|
||||
// because of a cyclic-dependency.
|
||||
process->handleTable.Destroy();
|
||||
|
||||
// Destroy the virtual memory space.
|
||||
// Don't actually deallocate it yet though; that is done on an async task queued by RemoveProcess.
|
||||
// This must be destroyed after the handle table!
|
||||
MMSpaceDestroy(process->vmm);
|
||||
|
||||
// Tell Desktop the process has terminated.
|
||||
if (!scheduler.shutdown) {
|
||||
_EsMessageWithObject m;
|
||||
EsMemoryZero(&m, sizeof(m));
|
||||
m.message.type = ES_MSG_PROCESS_TERMINATED;
|
||||
m.message.crash.pid = process->id;
|
||||
desktopProcess->messageQueue.SendMessage(&m);
|
||||
}
|
||||
}
|
||||
|
||||
void KillThread(EsGeneric _thread) {
|
||||
Thread *thread = (Thread *) _thread.p;
|
||||
|
||||
KSpinlockAcquire(&scheduler.lock);
|
||||
scheduler.allThreads.Remove(&thread->allItem);
|
||||
thread->process->threads.Remove(&thread->processItem);
|
||||
|
||||
KernelLog(LOG_INFO, "Scheduler", "killing thread",
|
||||
"Killing thread (ID %d, %d remain in process %d) %x...\n", thread->id, thread->process->threads.count, thread->process->id, _thread);
|
||||
|
||||
if (thread->process->threads.count == 0) {
|
||||
KillProcess(thread->process); // Releases the scheduler's lock.
|
||||
} else {
|
||||
KSpinlockRelease(&scheduler.lock);
|
||||
}
|
||||
|
||||
MMFree(kernelMMSpace, (void *) thread->kernelStackBase);
|
||||
if (thread->userStackBase) MMFree(thread->process->vmm, (void *) thread->userStackBase);
|
||||
|
||||
KEventSet(&thread->killedEvent);
|
||||
|
||||
// Close the handle that this thread owns of its owner process, and the handle it owns of itself.
|
||||
CloseHandleToObject(thread->process, KERNEL_OBJECT_PROCESS);
|
||||
CloseHandleToObject(thread, KERNEL_OBJECT_THREAD);
|
||||
}
|
||||
|
||||
Thread *Scheduler::PickThread(CPULocalStorage *local) {
|
||||
KSpinlockAssertLocked(&lock);
|
||||
|
||||
if (local->asyncTasksRead != local->asyncTasksWrite
|
||||
&& local->asyncTaskThread->state == THREAD_ACTIVE) {
|
||||
if ((local->asyncTaskList.first || local->inAsyncTask) && local->asyncTaskThread->state == THREAD_ACTIVE) {
|
||||
// If the asynchronous task thread for this processor isn't blocked, and has tasks to process, execute it.
|
||||
return local->asyncTaskThread;
|
||||
}
|
||||
|
@ -1261,7 +1232,7 @@ void Scheduler::Yield(InterruptContext *context) {
|
|||
if (killThread) {
|
||||
local->currentThread->state = THREAD_TERMINATED;
|
||||
KernelLog(LOG_INFO, "Scheduler", "terminate yielded thread", "Terminated yielded thread %x\n", local->currentThread);
|
||||
RegisterAsyncTask(KillThread, local->currentThread, local->currentThread->process, true);
|
||||
KRegisterAsyncTask(&local->currentThread->killAsyncTask, KillThread);
|
||||
}
|
||||
|
||||
// If the thread is waiting for an object to be notified, put it in the relevant blockedThreads list.
|
||||
|
@ -1336,7 +1307,7 @@ void Scheduler::Yield(InterruptContext *context) {
|
|||
KEventSet(&timer->event, true /* scheduler already locked */);
|
||||
|
||||
if (timer->callback) {
|
||||
RegisterAsyncTask(timer->callback, timer->argument, nullptr, true);
|
||||
KRegisterAsyncTask(&timer->asyncTask, timer->callback);
|
||||
}
|
||||
} else {
|
||||
break; // Timers are kept sorted, so there's no point continuing.
|
||||
|
|
Loading…
Reference in New Issue