fixes from stress testing

This commit is contained in:
nakst 2021-10-21 17:15:16 +01:00
parent bb37d2ae47
commit 394c545939
14 changed files with 342 additions and 152 deletions

View File

@ -176,6 +176,13 @@ EsError Extract(const char *pathIn, size_t pathInBytes, const char *pathOut, siz
if (nameBytes > NAME_MAX - pathOutBytes) break;
if (!Decompress(e, e->pathBuffer + pathOutBytes, nameBytes)) break;
if (fileSize == (uint64_t) -1UL) {
EsPrint("Creating folder '%s'...\n", pathOutBytes + nameBytes, (const char *) e->pathBuffer);
EsPathCreate((const char *) e->pathBuffer, pathOutBytes + nameBytes, ES_NODE_DIRECTORY, true);
continue;
}
EsPrint("Copying file '%s' of size %D...\n", pathOutBytes + nameBytes, (const char *) e->pathBuffer, fileSize);
EsFileInformation fileOut = EsFileOpen((const char *) e->pathBuffer, pathOutBytes + nameBytes,
ES_FILE_WRITE | ES_NODE_CREATE_DIRECTORIES | ES_NODE_FAIL_IF_FOUND);
EsFileOffset fileOutPosition = 0;

View File

@ -300,6 +300,11 @@ void UpdateDisplay(Instance *instance, int index) {
ADD_MEMORY_STATISTIC_DISPLAY("Commit fixed limit:", "%D (%d pages)", statistics.commitFixedLimit * ES_PAGE_SIZE, statistics.commitFixedLimit);
ADD_MEMORY_STATISTIC_DISPLAY("Commit remaining:", "%D (%d pages)", statistics.commitRemaining * ES_PAGE_SIZE, statistics.commitRemaining);
ADD_MEMORY_STATISTIC_DISPLAY("Zeroed frames:", "%D (%d pages)", statistics.countZeroedPages * ES_PAGE_SIZE, statistics.countZeroedPages);
ADD_MEMORY_STATISTIC_DISPLAY("Free frames:", "%D (%d pages)", statistics.countFreePages * ES_PAGE_SIZE, statistics.countFreePages);
ADD_MEMORY_STATISTIC_DISPLAY("Standby frames:", "%D (%d pages)", statistics.countStandbyPages * ES_PAGE_SIZE, statistics.countStandbyPages);
ADD_MEMORY_STATISTIC_DISPLAY("Active frames:", "%D (%d pages)", statistics.countActivePages * ES_PAGE_SIZE, statistics.countActivePages);
EsTimerSet(REFRESH_INTERVAL, [] (EsGeneric context) {
Instance *instance = (Instance *) context.p;

View File

@ -89,7 +89,7 @@ struct TaskBar : EsElement {
struct ContainerWindow {
WindowTabBand *tabBand;
TaskBarButton *taskBarButton;
TaskBarButton *taskBarButton; // Might be null. For example, in the installer there is no task bar.
EsWindow *window;
WindowTab *active;
};
@ -521,7 +521,7 @@ void WindowTabActivate(WindowTab *tab, bool force = false) {
if (tab->container->active != tab || force) {
tab->container->active = tab;
EsElementRelayout(tab->container->tabBand);
tab->container->taskBarButton->Repaint(true);
if (tab->container->taskBarButton) tab->container->taskBarButton->Repaint(true);
EsHandle handle = tab->notRespondingInstance ? tab->notRespondingInstance->embeddedWindowHandle : tab->applicationInstance->embeddedWindowHandle;
EsSyscall(ES_SYSCALL_WINDOW_SET_PROPERTY, tab->window->handle, handle, 0, ES_WINDOW_PROPERTY_EMBED);
}
@ -532,7 +532,7 @@ void WindowTabDestroy(WindowTab *tab) {
if (container->tabBand->items.Length() == 1) {
EsElementDestroy(container->window);
EsElementDestroy(container->taskBarButton);
if (container->taskBarButton) EsElementDestroy(container->taskBarButton);
desktop.allContainerWindows.FindAndDeleteSwap(container, true);
} else {
if (container->active == tab) {
@ -604,18 +604,24 @@ int CursorLocatorMessage(EsElement *element, EsMessage *message) {
}
int ProcessGlobalKeyboardShortcuts(EsElement *, EsMessage *message) {
if (desktop.installationState) {
// Do not process global keyboard shortcuts if the installer is running.
} else if (message->type == ES_MSG_KEY_DOWN) {
if (message->type == ES_MSG_KEY_DOWN) {
bool ctrlOnly = message->keyboard.modifiers == ES_MODIFIER_CTRL;
int scancode = ScancodeMapToLabel(message->keyboard.scancode);
uint32_t scancode = ScancodeMapToLabel(message->keyboard.scancode);
if (ctrlOnly && scancode == ES_SCANCODE_N && !message->keyboard.repeat) {
if (ctrlOnly && scancode == ES_SCANCODE_N && !message->keyboard.repeat && !desktop.installationState) {
ApplicationInstanceCreate(APPLICATION_ID_DESKTOP_BLANK_TAB, nullptr, nullptr);
} else if (message->keyboard.modifiers == (ES_MODIFIER_CTRL | ES_MODIFIER_FLAG) && scancode == ES_SCANCODE_D) {
if (!desktop.inspectorOpen) {
desktop.inspectorOpen = true;
EsThreadCreate(DesktopInspectorThread, nullptr, 0);
} else {
// TODO Close the inspector.
}
} else if (message->keyboard.modifiers == (ES_MODIFIER_CTRL | ES_MODIFIER_FLAG) && scancode == ES_SCANCODE_DELETE) {
for (uintptr_t i = 0; i < desktop.installedApplications.Length(); i++) {
if (desktop.installedApplications[i]->cName && 0 == EsCRTstrcmp(desktop.installedApplications[i]->cName, "System Monitor")) {
ApplicationInstanceCreate(desktop.installedApplications[i]->id, nullptr, nullptr);
}
}
} else {
return 0;
@ -646,11 +652,15 @@ int ContainerWindowMessage(EsElement *element, EsMessage *message) {
ContainerWindow *container = (ContainerWindow *) element->userData.p;
if (message->type == ES_MSG_WINDOW_ACTIVATED) {
container->taskBarButton->customStyleState |= THEME_STATE_SELECTED;
container->taskBarButton->MaybeRefreshStyle();
if (container->taskBarButton) {
container->taskBarButton->customStyleState |= THEME_STATE_SELECTED;
container->taskBarButton->MaybeRefreshStyle();
}
} else if (message->type == ES_MSG_WINDOW_DEACTIVATED) {
container->taskBarButton->customStyleState &= ~THEME_STATE_SELECTED;
container->taskBarButton->MaybeRefreshStyle();
if (container->taskBarButton) {
container->taskBarButton->customStyleState &= ~THEME_STATE_SELECTED;
container->taskBarButton->MaybeRefreshStyle();
}
} else if (message->type == ES_MSG_KEY_DOWN) {
bool ctrlOnly = message->keyboard.modifiers == ES_MODIFIER_CTRL;
int scancode = ScancodeMapToLabel(message->keyboard.scancode);
@ -1046,12 +1056,14 @@ ContainerWindow *ContainerWindowCreate() {
ApplicationInstanceCreate(APPLICATION_ID_DESKTOP_BLANK_TAB, nullptr, (ContainerWindow *) element->window->userData.p);
});
container->taskBarButton = (TaskBarButton *) EsHeapAllocate(sizeof(TaskBarButton), true);
container->taskBarButton->customStyleState = THEME_STATE_SELECTED;
container->taskBarButton->containerWindow = container;
container->taskBarButton->Initialise(&desktop.taskBar.taskList, ES_CELL_FILL,
TaskBarButtonMessage, ES_STYLE_TASK_BAR_BUTTON);
container->taskBarButton->cName = "task bar button";
if (!desktop.installationState) {
container->taskBarButton = (TaskBarButton *) EsHeapAllocate(sizeof(TaskBarButton), true);
container->taskBarButton->customStyleState = THEME_STATE_SELECTED;
container->taskBarButton->containerWindow = container;
container->taskBarButton->Initialise(&desktop.taskBar.taskList, ES_CELL_FILL,
TaskBarButtonMessage, ES_STYLE_TASK_BAR_BUTTON);
container->taskBarButton->cName = "task bar button";
}
return container;
}
@ -2423,7 +2435,9 @@ ApplicationInstance *ApplicationInstanceFindForeground() {
ApplicationInstance *instance = desktop.allApplicationInstances[i];
WindowTab *tab = instance->tab;
if (tab && (tab->container->taskBarButton->customStyleState & THEME_STATE_SELECTED) && tab->container->active == instance->tab) {
if (tab && tab->container->taskBarButton
&& (tab->container->taskBarButton->customStyleState & THEME_STATE_SELECTED)
&& tab->container->active == instance->tab) {
return instance;
}
}
@ -2506,6 +2520,12 @@ void DesktopSetup() {
}
}
{
EsRectangle screen;
EsSyscall(ES_SYSCALL_SCREEN_BOUNDS_GET, 0, (uintptr_t) &screen, 0, 0);
EsSyscall(ES_SYSCALL_SCREEN_WORK_AREA_SET, 0, (uintptr_t) &screen, 0, 0);
}
if (desktop.installationState == INSTALLATION_STATE_NONE) {
// Create the taskbar.
@ -2580,7 +2600,7 @@ void DesktopSetup() {
EsHeapFree(firstApplication);
}
} else if (desktop.installationState == INSTALLATION_STATE_INSTALLER) {
// Start the instller.
// Start the installer.
if (!desktop.setupDesktopUIComplete) {
ApplicationInstanceCreate(desktop.installer->id, nullptr, nullptr, true /* hidden */);
@ -2827,7 +2847,7 @@ void DesktopSyscall(EsMessage *message, uint8_t *buffer, EsBuffer *pipe) {
if (instance->tab) {
instance->tab->Repaint(true);
if (instance->tab == instance->tab->container->active) {
if (instance->tab == instance->tab->container->active && instance->tab->container->taskBarButton) {
instance->tab->container->taskBarButton->Repaint(true);
}
}

View File

@ -1896,6 +1896,10 @@ struct EsMemoryStatistics {
size_t commitRemaining;
size_t maximumObjectCachePages;
size_t approximateObjectCacheSize;
size_t countZeroedPages;
size_t countFreePages;
size_t countStandbyPages;
size_t countActivePages;
};
struct EsFontInformation {

View File

@ -201,6 +201,15 @@ void Device::Initialise() {
drive->access = DriveAccess;
drive->information.driveType = ES_DRIVE_TYPE_USB_MASS_STORAGE;
char buffer[256];
if (parent->GetString(parent->deviceDescriptor.productString, buffer, sizeof(buffer))) {
size_t bytes = EsCStringLength(buffer);
if (bytes > sizeof(drive->information.model)) bytes = sizeof(drive->information.model);
EsMemoryCopy(drive->information.model, buffer, bytes);
drive->information.modelBytes = bytes;
}
FSRegisterBlockDevice(drive);
}
}

View File

@ -912,13 +912,15 @@ EsError CCSpaceAccess(CCSpace *cache, K_USER_BUFFER void *_buffer, EsFileOffset
// Mark the page as active before we map it.
MMPhysicalActivatePages(entry / K_PAGE_SIZE, 1, ES_FLAGS_DEFAULT);
frame->cacheReference = cachedSection->data + pageInCachedSectionIndex;
} else if (frame->state != MMPageFrame::ACTIVE) {
KernelPanic("CCSpaceAccess - Page frame %x was neither standby nor active.\n", frame);
} else if (!frame->active.references) {
KernelPanic("CCSpaceAccess - Active page frame %x had no references.\n", frame);
}
frame->active.references++;
MMArchMapPage(kernelMMSpace, entry & ~(K_PAGE_SIZE - 1), (uintptr_t) sectionBase + i * K_PAGE_SIZE, MM_MAP_PAGE_FRAME_LOCK_ACQUIRED);
__sync_synchronize();
section->referencedPages[i >> 3] |= 1 << (i & 7);
section->referencedPageCount++;
@ -1095,10 +1097,23 @@ EsError CCSpaceAccess(CCSpace *cache, K_USER_BUFFER void *_buffer, EsFileOffset
for (uintptr_t i = start; i < end; i += K_PAGE_SIZE) {
uintptr_t physicalAddress = MMArchTranslateAddress(kernelMMSpace, (uintptr_t) sectionBase + i, false);
KMutexAcquire(&pmm.pageFrameMutex);
pmm.pageFrames[physicalAddress / K_PAGE_SIZE].active.references++;
MMPageFrame *frame = &pmm.pageFrames[physicalAddress / K_PAGE_SIZE];
if (frame->state != MMPageFrame::ACTIVE || !frame->active.references) {
KernelPanic("CCSpaceAccess - Bad active frame %x; removed while still in use by the active section.\n", frame);
}
frame->active.references++;
if (!MMArchMapPage(mapSpace, physicalAddress, (uintptr_t) buffer,
mapFlags | MM_MAP_PAGE_IGNORE_IF_MAPPED /* since this isn't locked */
| MM_MAP_PAGE_FRAME_LOCK_ACQUIRED)) {
// The page was already mapped.
// Don't need to check if this goes to zero, because the page frame mutex is still acquired.
frame->active.references--;
}
KMutexRelease(&pmm.pageFrameMutex);
MMArchMapPage(mapSpace, physicalAddress, (uintptr_t) buffer,
mapFlags | MM_MAP_PAGE_IGNORE_IF_MAPPED /* since this isn't locked */);
buffer += K_PAGE_SIZE;
}
} else if (flags & CC_ACCESS_READ) {

View File

@ -66,19 +66,22 @@ struct MMRegion {
// One per process.
struct MMSpace {
VIRTUAL_ADDRESS_SPACE_DATA(); // Architecture specific data.
VIRTUAL_ADDRESS_SPACE_DATA(); // Architecture specific data.
AVLTree<MMRegion> // Key =
freeRegionsBase, // Base address
freeRegionsSize, // Page count
usedRegions; // Base address
AVLTree<MMRegion> // Key =
freeRegionsBase, // Base address
freeRegionsSize, // Page count
usedRegions; // Base address
LinkedList<MMRegion> usedRegionsNonGuard;
KMutex reserveMutex; // Acquire to Access the region trees.
KMutex reserveMutex; // Acquire to Access the region trees.
bool user; // Regions in the space may be accessed from userspace.
uint64_t commit; // An *approximate* commit in pages. TODO Better memory usage tracking.
uint64_t reserve; // The number of reserved pages.
volatile int32_t referenceCount; // One per CPU using the space, and +1 while the process is alive.
// We don't bother tracking for kernelMMSpace.
bool user; // Regions in the space may be accessed from userspace.
uint64_t commit; // An *approximate* commit in pages. TODO Better memory usage tracking.
uint64_t reserve; // The number of reserved pages.
};
// A physical page of memory.
@ -158,13 +161,13 @@ struct PMM {
uintptr_t countZeroedPages, countFreePages, countStandbyPages, countActivePages;
#define MM_REMAINING_COMMIT() (pmm.commitLimit - pmm.commitPageable - pmm.commitFixed)
#define MM_REMAINING_COMMIT() (pmm.commitLimit - pmm.commitPageable - pmm.commitFixed)
int64_t commitFixed, commitPageable,
commitFixedLimit, commitLimit;
// Acquire to:
KMutex commitMutex, // (Un)commit pages.
pageFrameMutex; // Allocate or free pages.
// Acquire to:
KMutex commitMutex, // (Un)commit pages.
pageFrameMutex; // Allocate or free pages.
KMutex pmManipulationLock;
KSpinlock pmManipulationProcessorLock;
@ -177,12 +180,12 @@ struct PMM {
KMutex objectCacheListMutex;
// Events for when the number of available pages is low.
#define MM_AVAILABLE_PAGES() (pmm.countZeroedPages + pmm.countFreePages + pmm.countStandbyPages)
#define MM_AVAILABLE_PAGES() (pmm.countZeroedPages + pmm.countFreePages + pmm.countStandbyPages)
KEvent availableCritical, availableLow;
KEvent availableNotCritical;
// Event for when the object cache should be trimmed.
#define MM_OBJECT_CACHE_SHOULD_TRIM() (pmm.approximateTotalObjectCacheBytes / K_PAGE_SIZE > MM_OBJECT_CACHE_PAGES_MAXIMUM())
#define MM_OBJECT_CACHE_SHOULD_TRIM() (pmm.approximateTotalObjectCacheBytes / K_PAGE_SIZE > MM_OBJECT_CACHE_PAGES_MAXIMUM())
uintptr_t approximateTotalObjectCacheBytes;
KEvent trimObjectCaches;
@ -246,7 +249,7 @@ extern MMSpace _kernelMMSpace, _coreMMSpace;
// Architecture-dependent functions.
void MMArchMapPage(MMSpace *space, uintptr_t physicalAddress, uintptr_t virtualAddress, unsigned flags);
bool MMArchMapPage(MMSpace *space, uintptr_t physicalAddress, uintptr_t virtualAddress, unsigned flags); // Returns false if the page was already mapped.
void MMArchUnmapPages(MMSpace *space, uintptr_t virtualAddressStart, uintptr_t pageCount, unsigned flags, size_t unmapMaximum = 0, uintptr_t *resumePosition = nullptr);
void MMArchInvalidatePages(uintptr_t virtualAddressStart, uintptr_t pageCount);
bool MMArchHandlePageFault(uintptr_t address, uint32_t flags);
@ -263,7 +266,7 @@ void MMFinalizeVAS(MMSpace *space);
// Forward declarations.
bool MMHandlePageFault(MMSpace *space, uintptr_t address, unsigned flags);
bool MMUnmapFilePage(uintptr_t frameNumber, bool justLoaded = false); // Returns true if the page became inactive.
bool MMUnmapFilePage(uintptr_t frameNumber); // Returns true if the page became inactive.
// Public memory manager functions.
@ -991,8 +994,7 @@ void MMUnreserve(MMSpace *space, MMRegion *remove, bool unmapPages, bool guardRe
}
if (unmapPages) {
MMArchUnmapPages(space, remove->baseAddress,
remove->pageCount, ES_FLAGS_DEFAULT);
MMArchUnmapPages(space, remove->baseAddress, remove->pageCount, ES_FLAGS_DEFAULT);
}
space->reserve += remove->pageCount;
@ -1573,8 +1575,7 @@ bool MMFree(MMSpace *space, void *address, size_t expectedSize, bool userOnly) {
} else if (region->flags & MM_REGION_SHARED) {
sharedRegionToFree = region->data.shared.region;
} else if (region->flags & MM_REGION_FILE) {
MMArchUnmapPages(space, region->baseAddress,
region->pageCount, MM_UNMAP_PAGES_FREE_COPIED | MM_UNMAP_PAGES_BALANCE_FILE);
MMArchUnmapPages(space, region->baseAddress, region->pageCount, MM_UNMAP_PAGES_FREE_COPIED | MM_UNMAP_PAGES_BALANCE_FILE);
unmapPages = false;
FSFile *node = region->data.file.node;
@ -1651,58 +1652,58 @@ void MMSpaceDestroy(MMSpace *space) {
MMFreeVAS(space);
}
bool MMUnmapFilePage(uintptr_t frameNumber, bool justLoaded) {
bool MMUnmapFilePage(uintptr_t frameNumber) {
KMutexAssertLocked(&pmm.pageFrameMutex);
MMPageFrame *frame = pmm.pageFrames + frameNumber;
if (!justLoaded) {
if (frame->state != MMPageFrame::ACTIVE || !frame->active.references) {
KernelPanic("MMUnmapFilePage - Corrupt page frame database (%d/%x).\n", frameNumber, frame);
}
// Decrease the reference count.
frame->active.references--;
if (frame->state != MMPageFrame::ACTIVE || !frame->active.references) {
KernelPanic("MMUnmapFilePage - Corrupt page frame database (%d/%x).\n", frameNumber, frame);
}
if (!frame->active.references) {
// If there are no more references, then the frame can be moved to the standby or modified list.
// Decrease the reference count.
frame->active.references--;
// EsPrint("Unmap file page: %x\n", frameNumber << K_PAGE_BITS);
{
frame->state = MMPageFrame::STANDBY;
pmm.countStandbyPages++;
if (*frame->cacheReference != ((frameNumber << K_PAGE_BITS) | MM_SHARED_ENTRY_PRESENT)) {
KernelPanic("MMUnmapFilePage - Corrupt shared reference back pointer in frame %x.\n", frame);
}
frame->list.next = pmm.firstStandbyPage;
frame->list.previous = &pmm.firstStandbyPage;
if (pmm.firstStandbyPage) pmm.pageFrames[pmm.firstStandbyPage].list.previous = &frame->list.next;
if (!pmm.lastStandbyPage) pmm.lastStandbyPage = frameNumber;
pmm.firstStandbyPage = frameNumber;
MMUpdateAvailablePageCount(true);
}
pmm.countActivePages--;
return true;
if (frame->active.references) {
return false;
}
return false;
// If there are no more references, then the frame can be moved to the standby or modified list.
// EsPrint("Unmap file page: %x\n", frameNumber << K_PAGE_BITS);
frame->state = MMPageFrame::STANDBY;
pmm.countStandbyPages++;
if (*frame->cacheReference != ((frameNumber << K_PAGE_BITS) | MM_SHARED_ENTRY_PRESENT)) {
KernelPanic("MMUnmapFilePage - Corrupt shared reference back pointer in frame %x.\n", frame);
}
frame->list.next = pmm.firstStandbyPage;
frame->list.previous = &pmm.firstStandbyPage;
if (pmm.firstStandbyPage) pmm.pageFrames[pmm.firstStandbyPage].list.previous = &frame->list.next;
if (!pmm.lastStandbyPage) pmm.lastStandbyPage = frameNumber;
pmm.firstStandbyPage = frameNumber;
MMUpdateAvailablePageCount(true);
pmm.countActivePages--;
return true;
}
void MMBalanceThread() {
size_t targetAvailablePages = 0;
while (true) {
#if 1
if (MM_AVAILABLE_PAGES() >= targetAvailablePages) {
// Wait for there to be a low number of available pages.
KEventWait(&pmm.availableLow);
targetAvailablePages = MM_LOW_AVAILABLE_PAGES_THRESHOLD + MM_PAGES_TO_FIND_BALANCE;
}
#else
// Test the balance thread works correctly by running it constantly.
targetAvailablePages = MM_AVAILABLE_PAGES() * 2;
#endif
// EsPrint("--> Balance!\n");
@ -1828,12 +1829,14 @@ void MMZeroPageThread() {
for (int j = 0; j < i; j++) pages[j] <<= K_PAGE_BITS;
if (i) PMZero(pages, i, false);
{
KMutexAcquire(&pmm.pageFrameMutex);
pmm.countActivePages -= i;
while (i--) MMPhysicalInsertZeroedPage(pages[i] >> K_PAGE_BITS);
KMutexRelease(&pmm.pageFrameMutex);
KMutexAcquire(&pmm.pageFrameMutex);
pmm.countActivePages -= i;
while (i--) {
MMPhysicalInsertZeroedPage(pages[i] >> K_PAGE_BITS);
}
KMutexRelease(&pmm.pageFrameMutex);
}
}
}
@ -1927,7 +1930,8 @@ void PMCopy(uintptr_t page, void *_source, size_t pageCount) {
void *region = pmm.pmManipulationRegion;
for (uintptr_t i = 0; i < doCount; i++) {
MMArchMapPage(vas, page + K_PAGE_SIZE * i, (uintptr_t) region + K_PAGE_SIZE * i, MM_MAP_PAGE_OVERWRITE | MM_MAP_PAGE_NO_NEW_TABLES);
MMArchMapPage(vas, page + K_PAGE_SIZE * i, (uintptr_t) region + K_PAGE_SIZE * i,
MM_MAP_PAGE_OVERWRITE | MM_MAP_PAGE_NO_NEW_TABLES);
}
KSpinlockAcquire(&pmm.pmManipulationProcessorLock);
@ -1963,7 +1967,8 @@ void PMRead(uintptr_t page, void *_source, size_t pageCount) {
void *region = pmm.pmManipulationRegion;
for (uintptr_t i = 0; i < doCount; i++) {
MMArchMapPage(vas, page + K_PAGE_SIZE * i, (uintptr_t) region + K_PAGE_SIZE * i, MM_MAP_PAGE_OVERWRITE | MM_MAP_PAGE_NO_NEW_TABLES);
MMArchMapPage(vas, page + K_PAGE_SIZE * i, (uintptr_t) region + K_PAGE_SIZE * i,
MM_MAP_PAGE_OVERWRITE | MM_MAP_PAGE_NO_NEW_TABLES);
}
KSpinlockAcquire(&pmm.pmManipulationProcessorLock);
@ -2265,6 +2270,42 @@ void MMObjectCacheTrimThread() {
}
}
void MMSpaceOpenReference(MMSpace *space) {
if (space == kernelMMSpace) {
return;
}
if (space->referenceCount < 1) {
KernelPanic("MMSpaceOpenReference - Space %x has invalid reference count.\n", space);
}
if (space->referenceCount >= K_MAX_PROCESSORS + 1) {
KernelPanic("MMSpaceOpenReference - Space %x has too many references (expected a maximum of %d).\n", K_MAX_PROCESSORS + 1);
}
__sync_fetch_and_add(&space->referenceCount, 1);
}
void MMSpaceCloseReference(MMSpace *space) {
if (space == kernelMMSpace) {
return;
}
if (space->referenceCount < 1) {
KernelPanic("MMSpaceCloseReference - Space %x has invalid reference count.\n", space);
}
if (__sync_fetch_and_sub(&space->referenceCount, 1) > 1) {
return;
}
KRegisterAsyncTask([] (EsGeneric _space) {
MMSpace *space = (MMSpace *) _space.p;
MMFinalizeVAS(space);
scheduler.mmSpacePool.Remove(space);
}, space);
}
void MMInitialise() {
{
// Initialise coreMMSpace.
@ -2297,7 +2338,7 @@ void MMInitialise() {
pmm.pmManipulationRegion = (void *) MMReserve(kernelMMSpace, PHYSICAL_MEMORY_MANIPULATION_REGION_PAGES * K_PAGE_SIZE, ES_FLAGS_DEFAULT)->baseAddress;
KMutexRelease(&kernelMMSpace->reserveMutex);
physicalMemoryHighest += K_PAGE_SIZE << 3;
physicalMemoryHighest += K_PAGE_SIZE << 3; // 1 extra for the top page, then round up so the page bitset is byte-aligned.
pmm.pageFrames = (MMPageFrame *) MMStandardAllocate(kernelMMSpace, (physicalMemoryHighest >> K_PAGE_BITS) * sizeof(MMPageFrame), MM_REGION_FIXED);
pmm.freeOrZeroedPageBitset.Initialise(physicalMemoryHighest >> K_PAGE_BITS, true);

View File

@ -161,9 +161,9 @@ extern "C" size_t ProcessorSendIPI(uintptr_t interrupt, bool nmi = false, int pr
extern "C" void ProcessorDebugOutputByte(uint8_t byte);
extern "C" void ProcessorFakeTimerInterrupt();
extern "C" uint64_t ProcessorReadTimeStamp();
extern "C" void DoContextSwitch(struct InterruptContext *context,
uintptr_t virtualAddressSpace, uintptr_t threadKernelStack, struct Thread *newThread);
extern "C" void ProcessorSetAddressSpace(uintptr_t virtualAddressSpaceIdentifier);
extern "C" void DoContextSwitch(struct InterruptContext *context, uintptr_t virtualAddressSpace, uintptr_t threadKernelStack,
struct Thread *newThread, struct MMSpace *oldAddressSpace);
extern "C" void ProcessorSetAddressSpace(uintptr_t virtualAddressSpaceIdentifier); // Need to call MMSpaceOpenReference/MMSpaceCloseReference if using this.
extern "C" uintptr_t ProcessorGetAddressSpace();
extern "C" void ProcessorFlushCodeCache();
extern "C" void ProcessorFlushCache();

View File

@ -752,6 +752,7 @@ Process *Scheduler::SpawnProcess(ProcessType processType) {
process->id = nextProcessID++;
KSpinlockRelease(&scheduler.lock);
process->vmm->referenceCount = 1;
process->allItem.thisItem = process;
process->handles = 1;
process->handleTable.process = process;
@ -769,11 +770,18 @@ Process *Scheduler::SpawnProcess(ProcessType processType) {
}
void Thread::SetAddressSpace(MMSpace *space) {
temporaryAddressSpace = space;
if (this == GetCurrentThread()) {
ProcessorSetAddressSpace(VIRTUAL_ADDRESS_SPACE_IDENTIFIER(space ? space : kernelMMSpace));
if (this != GetCurrentThread()) {
KernelPanic("Thread::SetAddressSpace - Cannot change another thread's address space.\n");
}
KSpinlockAcquire(&scheduler.lock);
MMSpace *oldSpace = temporaryAddressSpace ?: kernelMMSpace;
temporaryAddressSpace = space;
MMSpace *newSpace = space ?: kernelMMSpace;
MMSpaceOpenReference(newSpace);
ProcessorSetAddressSpace(VIRTUAL_ADDRESS_SPACE_IDENTIFIER(newSpace));
KSpinlockRelease(&scheduler.lock);
MMSpaceCloseReference(oldSpace);
}
void AsyncTaskThread() {
@ -907,9 +915,7 @@ void Scheduler::RemoveProcess(Process *process) {
KRegisterAsyncTask([] (EsGeneric _process) {
Process *process = (Process *) _process.p;
MMSpace *space = process->vmm;
if (process->executableStartRequest) MMFinalizeVAS(space);
scheduler.mmSpacePool.Remove(space);
MMSpaceCloseReference(process->vmm);
scheduler.processPool.Remove(process);
}, process);
@ -1222,12 +1228,13 @@ void Scheduler::Yield(InterruptContext *context) {
ProcessorDisableInterrupts(); // We don't want interrupts to get reenabled after the context switch.
KSpinlockAcquire(&lock);
local->currentThread->interruptContext = context;
if (lock.interruptsEnabled) {
KernelPanic("Scheduler::Yield - Interrupts were enabled when scheduler lock was acquired.\n");
}
MMSpace *oldAddressSpace = local->currentThread->temporaryAddressSpace ?: local->currentThread->process->vmm;
local->currentThread->interruptContext = context;
local->currentThread->executing = false;
bool killThread = local->currentThread->terminatableState == THREAD_TERMINATABLE
@ -1345,8 +1352,6 @@ void Scheduler::Yield(InterruptContext *context) {
uint64_t nextTimer = 1;
ArchNextTimer(nextTimer);
InterruptContext *newContext = newThread->interruptContext;
if (!local->processorID) {
// Update the scheduler's time.
#if 1
@ -1363,9 +1368,11 @@ void Scheduler::Yield(InterruptContext *context) {
^ 0x8000000000000000) | ProcessorReadTimeStamp();
}
MMSpace *addressSpace = newThread->process->vmm;
if (newThread->temporaryAddressSpace) addressSpace = newThread->temporaryAddressSpace;
DoContextSwitch(newContext, VIRTUAL_ADDRESS_SPACE_IDENTIFIER(addressSpace), newThread->kernelStack, newThread);
InterruptContext *newContext = newThread->interruptContext;
MMSpace *addressSpace = newThread->temporaryAddressSpace ?: newThread->process->vmm;
MMSpaceOpenReference(addressSpace);
DoContextSwitch(newContext, VIRTUAL_ADDRESS_SPACE_IDENTIFIER(addressSpace), newThread->kernelStack, newThread, oldAddressSpace);
KernelPanic("Scheduler::Yield - DoContextSwitch unexpectedly returned.\n");
}
void Scheduler::Shutdown() {

View File

@ -1907,6 +1907,10 @@ SYSCALL_IMPLEMENT(ES_SYSCALL_DEBUG_COMMAND) {
statistics.commitRemaining = MM_REMAINING_COMMIT();
statistics.maximumObjectCachePages = MM_OBJECT_CACHE_PAGES_MAXIMUM();
statistics.approximateObjectCacheSize = pmm.approximateTotalObjectCacheBytes;
statistics.countZeroedPages = pmm.countZeroedPages;
statistics.countFreePages = pmm.countFreePages;
statistics.countStandbyPages = pmm.countStandbyPages;
statistics.countActivePages = pmm.countActivePages;
SYSCALL_WRITE(argument1, &statistics, sizeof(statistics));
}
#endif

View File

@ -372,6 +372,9 @@ void WindowManager::PressKey(unsigned scancode) {
| ((shift | shift2) ? ES_MODIFIER_SHIFT : 0)
| ((flag | flag2) ? ES_MODIFIER_FLAG : 0);
KernelLog(LOG_VERBOSE, "WM", "press key", "WindowManager::PressKey - Received key press %x. Modifiers are %X. Keys held: %d/%d%z.\n",
scancode, modifiers, keysHeld, maximumKeysHeld, single ? " (single)" : "");
{
EsMessage message;
EsMemoryZero(&message, sizeof(EsMessage));
@ -395,7 +398,9 @@ void WindowManager::PressKey(unsigned scancode) {
keysHeldBitSet[message.keyboard.scancode / 8] &= ~(1 << (message.keyboard.scancode % 8));
}
if (activeWindow) {
if ((modifiers & ES_MODIFIER_CTRL) && (modifiers & ES_MODIFIER_FLAG)) {
desktopProcess->messageQueue.SendMessage(nullptr, &message);
} else if (activeWindow) {
SendMessageToWindow(activeWindow, &message);
} else {
desktopProcess->messageQueue.SendMessage(nullptr, &message);
@ -829,6 +834,8 @@ bool Window::Move(EsRectangle rectangle, uint32_t flags) {
return false;
}
windowManager.resizeQueued = false;
bool result = true;
isMaximised = flags & ES_WINDOW_MOVE_MAXIMIZED;
@ -916,7 +923,7 @@ bool Window::Move(EsRectangle rectangle, uint32_t flags) {
}
void EmbeddedWindow::Destroy() {
KernelLog(LOG_VERBOSE, "Window Manager", "destroy embedded window", "EmbeddedWindow::Destroy - Destroying embedded window.\n");
KernelLog(LOG_INFO, "WM", "destroy embedded window", "EmbeddedWindow::Destroy - Destroying embedded window.\n");
EsHeapFree(this, sizeof(EmbeddedWindow), K_PAGED);
}

View File

@ -51,6 +51,7 @@ struct VirtualAddressSpaceData {
uint8_t l2Commit[L2_COMMIT_SIZE_BYTES];
uint8_t l3Commit[L3_COMMIT_SIZE_BYTES];
size_t pageTablesCommitted;
size_t pageTablesActive;
// TODO Consider core/kernel mutex consistency? I think it's fine, but...
KMutex mutex; // Acquire to modify the page tables.
@ -201,13 +202,20 @@ bool MMArchMakePageWritable(MMSpace *space, uintptr_t virtualAddress) {
return true;
}
void MMArchMapPage(MMSpace *space, uintptr_t physicalAddress, uintptr_t virtualAddress, unsigned flags) {
bool MMArchMapPage(MMSpace *space, uintptr_t physicalAddress, uintptr_t virtualAddress, unsigned flags) {
// TODO Use the no-execute bit.
if (physicalAddress & (K_PAGE_SIZE - 1)) {
KernelPanic("MMArchMapPage - Physical address not page aligned.\n");
}
if (pmm.pageFrames && physicalAddress < physicalMemoryHighest) {
if (pmm.pageFrames[physicalAddress >> K_PAGE_BITS].state != MMPageFrame::ACTIVE
&& pmm.pageFrames[physicalAddress >> K_PAGE_BITS].state != MMPageFrame::UNUSABLE) {
KernelPanic("MMArchMapPage - Physical page frame %x not marked as ACTIVE or UNUSABLE.\n", physicalAddress);
}
}
bool acquireFrameLock = !(flags & (MM_MAP_PAGE_NO_NEW_TABLES | MM_MAP_PAGE_FRAME_LOCK_ACQUIRED));
if (acquireFrameLock) KMutexAcquire(&pmm.pageFrameMutex);
EsDefer(if (acquireFrameLock) KMutexRelease(&pmm.pageFrameMutex););
@ -249,6 +257,7 @@ void MMArchMapPage(MMSpace *space, uintptr_t physicalAddress, uintptr_t virtualA
PAGE_TABLE_L4[indexL4] = MMPhysicalAllocate(MM_PHYSICAL_ALLOCATE_LOCK_ACQUIRED) | 7;
ProcessorInvalidatePage((uintptr_t) (PAGE_TABLE_L3 + indexL3)); // Not strictly necessary.
EsMemoryZero((void *) ((uintptr_t) (PAGE_TABLE_L3 + indexL3) & ~(K_PAGE_SIZE - 1)), K_PAGE_SIZE);
space->data.pageTablesActive++;
}
if ((PAGE_TABLE_L3[indexL3] & 1) == 0) {
@ -256,6 +265,7 @@ void MMArchMapPage(MMSpace *space, uintptr_t physicalAddress, uintptr_t virtualA
PAGE_TABLE_L3[indexL3] = MMPhysicalAllocate(MM_PHYSICAL_ALLOCATE_LOCK_ACQUIRED) | 7;
ProcessorInvalidatePage((uintptr_t) (PAGE_TABLE_L2 + indexL2)); // Not strictly necessary.
EsMemoryZero((void *) ((uintptr_t) (PAGE_TABLE_L2 + indexL2) & ~(K_PAGE_SIZE - 1)), K_PAGE_SIZE);
space->data.pageTablesActive++;
}
if ((PAGE_TABLE_L2[indexL2] & 1) == 0) {
@ -263,6 +273,7 @@ void MMArchMapPage(MMSpace *space, uintptr_t physicalAddress, uintptr_t virtualA
PAGE_TABLE_L2[indexL2] = MMPhysicalAllocate(MM_PHYSICAL_ALLOCATE_LOCK_ACQUIRED) | 7;
ProcessorInvalidatePage((uintptr_t) (PAGE_TABLE_L1 + indexL1)); // Not strictly necessary.
EsMemoryZero((void *) ((uintptr_t) (PAGE_TABLE_L1 + indexL1) & ~(K_PAGE_SIZE - 1)), K_PAGE_SIZE);
space->data.pageTablesActive++;
}
uintptr_t oldValue = PAGE_TABLE_L1[indexL1];
@ -270,13 +281,32 @@ void MMArchMapPage(MMSpace *space, uintptr_t physicalAddress, uintptr_t virtualA
if (flags & MM_MAP_PAGE_WRITE_COMBINING) value |= 16; // This only works because we modified the PAT in SetupProcessor1.
if (flags & MM_MAP_PAGE_NOT_CACHEABLE) value |= 24;
if (flags & MM_MAP_PAGE_USER) value |= 7; else value |= 0x100;
if (flags & MM_MAP_PAGE_USER) value |= 7;
else value |= 1 << 8; // Global.
if (flags & MM_MAP_PAGE_READ_ONLY) value &= ~2;
if (flags & MM_MAP_PAGE_COPIED) value |= 1 << 9;
if (flags & MM_MAP_PAGE_COPIED) value |= 1 << 9; // Ignored by the CPU.
// When the CPU accesses or writes to a page,
// it will modify the table entry to set the accessed or dirty bits respectively,
// but it uses its TLB entry as the assumed previous value of the entry.
// When unmapping pages we can't atomically remove an entry and do the TLB shootdown.
// This creates a race condition:
// 1. CPU 0 maps a page table entry. The dirty bit is not set.
// 2. CPU 1 reads from the page. A TLB entry is created with the dirty bit not set.
// 3. CPU 0 unmaps the entry.
// 4. CPU 1 writes to the page. As the TLB entry has the dirty bit cleared, it sets the entry to its cached entry ORed with the dirty bit.
// 5. CPU 0 invalidates the entry.
// That is, CPU 1 didn't realize the page was unmapped when it wrote out its entry, so the page becomes mapped again.
// To prevent this, we mark all pages with the dirty and accessed bits when we initially map them.
// (We don't use these bits for anything, anyway. They're basically useless on SMP systems, as far as I can tell.)
// That said, a CPU won't overwrite and clear a dirty bit when writing out its accessed flag (tested on Qemu);
// see here https://stackoverflow.com/questions/69024372/.
// Tl;dr: if a CPU ever sees an entry without these bits set, it can overwrite the entry with junk whenever it feels like it.
value |= (1 << 5) | (1 << 6);
if ((oldValue & 1) && !(flags & MM_MAP_PAGE_OVERWRITE)) {
if (flags & MM_MAP_PAGE_IGNORE_IF_MAPPED) {
return;
return false;
}
if ((oldValue & ~(K_PAGE_SIZE - 1)) != physicalAddress) {
@ -299,6 +329,8 @@ void MMArchMapPage(MMSpace *space, uintptr_t physicalAddress, uintptr_t virtualA
// We rely on this page being invalidated on this CPU in some places.
ProcessorInvalidatePage(oldVirtualAddress);
return true;
}
bool MMArchIsBufferInUserRange(uintptr_t baseAddress, size_t byteCount) {
@ -434,8 +466,6 @@ void MMArchUnmapPages(MMSpace *space, uintptr_t virtualAddressStart, uintptr_t p
// - What do we need to invalidate when we do this?
for (uintptr_t i = start; i < pageCount; i++) {
// if (flags & MM_UNMAP_PAGES_BALANCE_FILE) EsPrint(",%d", i);
uintptr_t virtualAddress = (i << K_PAGE_BITS) + tableBase;
if ((PAGE_TABLE_L4[virtualAddress >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 3)] & 1) == 0) {
@ -459,34 +489,40 @@ void MMArchUnmapPages(MMSpace *space, uintptr_t virtualAddressStart, uintptr_t p
uintptr_t indexL1 = virtualAddress >> (K_PAGE_BITS + ENTRIES_PER_PAGE_TABLE_BITS * 0);
uintptr_t translation = PAGE_TABLE_L1[indexL1];
if (!(translation & 1)) continue;
if (!(translation & 1)) {
// The page wasn't mapped.
continue;
}
bool copy = translation & (1 << 9);
if (copy && (flags & MM_UNMAP_PAGES_BALANCE_FILE)) {
if (copy && (flags & MM_UNMAP_PAGES_BALANCE_FILE) && (~flags & MM_UNMAP_PAGES_FREE_COPIED)) {
// Ignore copied pages when balancing file mappings.
// EsPrint("Ignore copied page %x\n", virtualAddress);
} else {
PAGE_TABLE_L1[indexL1] = 0;
continue;
}
// NOTE MMArchInvalidatePages invalidates the page on all processors now,
// which I think makes this unnecessary?
// uint64_t invalidateAddress = (i << K_PAGE_BITS) + virtualAddressStart;
// ProcessorInvalidatePage(invalidateAddress);
if ((~translation & (1 << 5)) || (~translation & (1 << 6))) {
// See MMArchMapPage for a discussion of why these bits must be set.
KernelPanic("MMArchUnmapPages - Page found without accessed or dirty bit set (virtualAddress: %x, translation: %x).\n",
virtualAddress, translation);
}
if ((flags & MM_UNMAP_PAGES_FREE) || ((flags & MM_UNMAP_PAGES_FREE_COPIED) && copy)) {
MMPhysicalFree(translation & 0x0000FFFFFFFFF000, true);
} else if (flags & MM_UNMAP_PAGES_BALANCE_FILE) {
// EsPrint("Balance %x\n", virtualAddress);
PAGE_TABLE_L1[indexL1] = 0;
// It's safe to do this before invalidation,
// because the page fault handler is synchronisation with mutexes acquired above.
uintptr_t physicalAddress = translation & 0x0000FFFFFFFFF000;
if (MMUnmapFilePage((translation & 0x0000FFFFFFFFF000) >> K_PAGE_BITS)) {
if (resumePosition) {
if (!unmapMaximum--) {
*resumePosition = i;
break;
}
if ((flags & MM_UNMAP_PAGES_FREE) || ((flags & MM_UNMAP_PAGES_FREE_COPIED) && copy)) {
MMPhysicalFree(physicalAddress, true);
} else if (flags & MM_UNMAP_PAGES_BALANCE_FILE) {
// It's safe to do this before page invalidation,
// because the page fault handler is synchronised with the same mutexes acquired above.
if (MMUnmapFilePage(physicalAddress >> K_PAGE_BITS)) {
if (resumePosition) {
if (!unmapMaximum--) {
*resumePosition = i;
break;
}
}
}
@ -569,23 +605,35 @@ void MMFreeVAS(MMSpace *space) {
for (uintptr_t k = j * 512; k < (j + 1) * 512; k++) {
if (!PAGE_TABLE_L2[k]) continue;
MMPhysicalFree(PAGE_TABLE_L2[k] & (~0xFFF));
space->data.pageTablesActive--;
}
MMPhysicalFree(PAGE_TABLE_L3[j] & (~0xFFF));
space->data.pageTablesActive--;
}
MMPhysicalFree(PAGE_TABLE_L4[i] & (~0xFFF));
space->data.pageTablesActive--;
}
if (space->data.pageTablesActive) {
KernelPanic("MMFreeVAS - Space %x still has %d page tables active.\n", space, space->data.pageTablesActive);
}
KMutexAcquire(&coreMMSpace->reserveMutex);
MMUnreserve(coreMMSpace, MMFindRegion(coreMMSpace, (uintptr_t) space->data.l1Commit), true);
MMRegion *l1CommitRegion = MMFindRegion(coreMMSpace, (uintptr_t) space->data.l1Commit);
MMArchUnmapPages(coreMMSpace, l1CommitRegion->baseAddress, l1CommitRegion->pageCount, MM_UNMAP_PAGES_FREE);
MMUnreserve(coreMMSpace, l1CommitRegion, false /* we manually unmap pages above, so we can free them */);
KMutexRelease(&coreMMSpace->reserveMutex);
MMDecommit(space->data.pageTablesCommitted * K_PAGE_SIZE, true);
}
void MMFinalizeVAS(MMSpace *space) {
// Freeing the L4 page table has to be done in the kernel process, since it's the page CR3 currently points to!!
// This function is called in an async task.
if (!space->data.cr3) return;
// Freeing the L4 page table has to be done in the kernel process, since it's the page CR3 would points to!
// Therefore, this function only is called in an async task.
if (space->data.cr3 == ProcessorReadCR3()) KernelPanic("MMFinalizeVAS - Space %x is active.\n", space);
PMZero(&space->data.cr3, 1, true); // Fail as fast as possible if someone's still using this page.
MMPhysicalFree(space->data.cr3);
MMDecommit(K_PAGE_SIZE, true);
}
@ -1198,7 +1246,7 @@ extern "C" void InterruptHandler(InterruptContext *context) {
}
}
extern "C" bool PostContextSwitch(InterruptContext *context) {
extern "C" bool PostContextSwitch(InterruptContext *context, MMSpace *oldAddressSpace) {
CPULocalStorage *local = GetLocalStorage();
Thread *currentThread = GetCurrentThread();
@ -1231,12 +1279,15 @@ extern "C" bool PostContextSwitch(InterruptContext *context) {
// We can only free the scheduler's spinlock when we are no longer using the stack
// from the previous thread. See DoContextSwitch in x86_64.s.
// (Another CPU can KillThread this once it's back in activeThreads.)
KSpinlockRelease(&scheduler.lock, true);
if (ProcessorAreInterruptsEnabled()) {
KernelPanic("PostContextSwitch - Interrupts were enabled. (2)\n");
}
MMSpaceCloseReference(oldAddressSpace);
return newThread;
}

View File

@ -807,6 +807,7 @@ DoContextSwitch:
mov cr3,rsi
.cont:
mov rsp,rdi
mov rsi,r8
call PostContextSwitch
jmp ReturnFromInterruptHandler

View File

@ -308,7 +308,8 @@ void Compile(uint32_t flags, int partitionSize, const char *volumeLabel) {
}
if (flags & COMPILE_DO_BUILD) {
fprintf(f, "[install]\nfile=bin/drive\npartition_size=%d\npartition_label=%s\n\n", partitionSize, volumeLabel ?: "Essence HD");
fprintf(f, "[install]\nfile=bin/drive\npartition_size=%d\npartition_label=%s\n\n",
partitionSize, volumeLabel ?: strcmp(GetOptionString("General.installation_state"), "0") ? "Essence Installer" : "Essence HD");
}
fclose(f);
@ -431,7 +432,7 @@ void Run(int emulator, int log, int debug) {
const char *usbImage = GetOptionString("Emulator.USBImage");
char usbFlags[256];
if (usbImage && usbImage[0]) {
snprintf(usbFlags, sizeof(usbFlags), " -drive if=none,id=stick,file=%s -device usb-storage,bus=xhci.0,drive=stick ", usbImage);
snprintf(usbFlags, sizeof(usbFlags), " -drive if=none,format=raw,id=stick,file=%s -device usb-storage,bus=xhci.0,drive=stick ", usbImage);
} else {
usbFlags[0] = 0;
}
@ -1006,6 +1007,16 @@ void GatherFilesForInstallerArchive(FILE *file, const char *path1, const char *p
DIR *directory = opendir(path);
struct dirent *entry;
{
// Make sure empty folders are preserved.
uint64_t length = (uint64_t) -1L;
fwrite(&length, 1, sizeof(length), file);
uint16_t pathBytes = strlen(path2);
fwrite(&pathBytes, 1, sizeof(pathBytes), file);
fwrite(path2, 1, pathBytes, file);
printf("Directory: %s\n", path2);
}
while ((entry = readdir(directory))) {
if (0 == strcmp(entry->d_name, ".") || 0 == strcmp(entry->d_name, "..")) {
continue;
@ -1040,13 +1051,13 @@ void GatherFilesForInstallerArchive(FILE *file, const char *path1, const char *p
closedir(directory);
}
void BuildAndRun(int optimise, bool compile, int debug, int emulator) {
void BuildAndRun(int optimise, bool compile, int debug, int emulator, int log) {
Build(optimise, compile);
if (encounteredErrors) {
printf("Errors were encountered during the build.\n");
} else if (emulator != -1) {
Run(emulator, LOG_NORMAL, debug);
Run(emulator, log, debug);
}
}
@ -1067,27 +1078,35 @@ void DoCommand(const char *l) {
}
if (0 == strcmp(l, "b") || 0 == strcmp(l, "build")) {
BuildAndRun(OPTIMISE_OFF, true /* compile */, false /* debug */, -1);
BuildAndRun(OPTIMISE_OFF, true /* compile */, false /* debug */, -1, LOG_NORMAL);
} else if (0 == strcmp(l, "opt") || 0 == strcmp(l, "build-optimised")) {
BuildAndRun(OPTIMISE_ON, true /* compile */, false /* debug */, -1);
BuildAndRun(OPTIMISE_ON, true /* compile */, false /* debug */, -1, LOG_NORMAL);
} else if (0 == strcmp(l, "d") || 0 == strcmp(l, "debug")) {
BuildAndRun(OPTIMISE_OFF, true /* compile */, true /* debug */, EMULATOR_QEMU);
BuildAndRun(OPTIMISE_OFF, true /* compile */, true /* debug */, EMULATOR_QEMU, LOG_NORMAL);
} else if (0 == strcmp(l, "d3") || 0 == strcmp(l, "debug-without-compile")) {
BuildAndRun(OPTIMISE_OFF, false /* compile */, true /* debug */, EMULATOR_QEMU);
BuildAndRun(OPTIMISE_OFF, false /* compile */, true /* debug */, EMULATOR_QEMU, LOG_NORMAL);
} else if (0 == strcmp(l, "v") || 0 == strcmp(l, "vbox")) {
BuildAndRun(OPTIMISE_ON, true /* compile */, false /* debug */, EMULATOR_VIRTUALBOX);
BuildAndRun(OPTIMISE_ON, true /* compile */, false /* debug */, EMULATOR_VIRTUALBOX, LOG_NORMAL);
} else if (0 == strcmp(l, "v2") || 0 == strcmp(l, "vbox-without-opt")) {
BuildAndRun(OPTIMISE_OFF, true /* compile */, false /* debug */, EMULATOR_VIRTUALBOX);
BuildAndRun(OPTIMISE_OFF, true /* compile */, false /* debug */, EMULATOR_VIRTUALBOX, LOG_NORMAL);
} else if (0 == strcmp(l, "v3") || 0 == strcmp(l, "vbox-without-compile")) {
BuildAndRun(OPTIMISE_OFF, false /* compile */, false /* debug */, EMULATOR_VIRTUALBOX);
BuildAndRun(OPTIMISE_OFF, false /* compile */, false /* debug */, EMULATOR_VIRTUALBOX, LOG_NORMAL);
} else if (0 == strcmp(l, "t") || 0 == strcmp(l, "qemu-with-opt")) {
BuildAndRun(OPTIMISE_ON, true /* compile */, false /* debug */, EMULATOR_QEMU);
BuildAndRun(OPTIMISE_ON, true /* compile */, false /* debug */, EMULATOR_QEMU, LOG_NORMAL);
} else if (0 == strcmp(l, "t2") || 0 == strcmp(l, "test")) {
BuildAndRun(OPTIMISE_OFF, true /* compile */, false /* debug */, EMULATOR_QEMU);
BuildAndRun(OPTIMISE_OFF, true /* compile */, false /* debug */, EMULATOR_QEMU, LOG_NORMAL);
} else if (0 == strcmp(l, "t3") || 0 == strcmp(l, "qemu-without-compile")) {
BuildAndRun(OPTIMISE_OFF, false /* compile */, false /* debug */, EMULATOR_QEMU);
BuildAndRun(OPTIMISE_OFF, false /* compile */, false /* debug */, EMULATOR_QEMU, LOG_NORMAL);
} else if (0 == strcmp(l, "e")) {
Run(EMULATOR_QEMU, LOG_NORMAL, DEBUG_LATER);
} else if (0 == strcmp(l, "k") || 0 == strcmp(l, "qemu-with-kvm")) {
BuildAndRun(OPTIMISE_FULL, true /* compile */, DEBUG_NONE /* debug */, EMULATOR_QEMU);
BuildAndRun(OPTIMISE_FULL, true /* compile */, DEBUG_NONE /* debug */, EMULATOR_QEMU, LOG_NORMAL);
} else if (0 == strcmp(l, "kno")) {
BuildAndRun(OPTIMISE_ON, true /* compile */, DEBUG_NONE /* debug */, EMULATOR_QEMU, LOG_NORMAL);
} else if (0 == strcmp(l, "klv")) {
BuildAndRun(OPTIMISE_FULL, true /* compile */, DEBUG_NONE /* debug */, EMULATOR_QEMU, LOG_VERBOSE);
} else if (0 == strcmp(l, "tlv")) {
BuildAndRun(OPTIMISE_ON, true /* compile */, DEBUG_LATER /* debug */, EMULATOR_QEMU, LOG_VERBOSE);
} else if (0 == strcmp(l, "exit") || 0 == strcmp(l, "x") || 0 == strcmp(l, "quit") || 0 == strcmp(l, "q")) {
exit(0);
} else if (0 == strcmp(l, "compile") || 0 == strcmp(l, "c")) {