mirror of https://codeberg.org/pzp/pzp-db.git
log: improve overwrite() with scheduled flushes
This commit is contained in:
parent
2f527613c2
commit
9e7feb3d41
|
@ -103,12 +103,12 @@ function Log(filename, opts) {
|
||||||
/** @type {Map<BlockIndex, Array<CallableFunction>>} */
|
/** @type {Map<BlockIndex, Array<CallableFunction>>} */
|
||||||
const waitingDrain = new Map() // blockIndex -> []
|
const waitingDrain = new Map() // blockIndex -> []
|
||||||
/** @type {Array<CB<any>>} */
|
/** @type {Array<CB<any>>} */
|
||||||
const waitingFlushDelete = []
|
const waitingFlushOverwrites = []
|
||||||
/** @type {Map<BlockIndex, {blockBuf: B4A; offset: number}>} */
|
/** @type {Map<BlockIndex, {blockBuf: B4A; offset: number}>} */
|
||||||
const blocksToBeWritten = new Map() // blockIndex -> { blockBuf, offset }
|
const blocksToBeWritten = new Map() // blockIndex -> { blockBuf, offset }
|
||||||
/** @type {Map<BlockIndex, B4A>} */
|
/** @type {Map<BlockIndex, B4A>} */
|
||||||
const blocksWithDeletables = new Map() // blockIndex -> blockBuf
|
const blocksWithOverwritables = new Map() // blockIndex -> blockBuf
|
||||||
let flushingDelete = false
|
let flushingOverwrites = false
|
||||||
let writingBlockIndex = -1
|
let writingBlockIndex = -1
|
||||||
|
|
||||||
let latestBlockBuf = /** @type {B4A | null} */ (null)
|
let latestBlockBuf = /** @type {B4A | null} */ (null)
|
||||||
|
@ -473,18 +473,20 @@ function Log(filename, opts) {
|
||||||
(err, blockBuf) => {
|
(err, blockBuf) => {
|
||||||
if (err) return cb(err)
|
if (err) return cb(err)
|
||||||
assert(blockBuf, 'blockBuf should be defined in gotBlockForDelete')
|
assert(blockBuf, 'blockBuf should be defined in gotBlockForDelete')
|
||||||
const actualBlockBuf = blocksWithDeletables.get(blockIndex) ?? blockBuf
|
const blockBufNow = blocksWithOverwritables.get(blockIndex) ?? blockBuf
|
||||||
const offsetInBlock = getOffsetInBlock(offset)
|
const offsetInBlock = getOffsetInBlock(offset)
|
||||||
Record.overwriteAsEmpty(actualBlockBuf, offsetInBlock)
|
Record.overwriteAsEmpty(blockBufNow, offsetInBlock)
|
||||||
deletedBytes += Record.readSize(actualBlockBuf, offsetInBlock)
|
deletedBytes += Record.readSize(blockBufNow, offsetInBlock)
|
||||||
blocksWithDeletables.set(blockIndex, actualBlockBuf)
|
blocksWithOverwritables.set(blockIndex, blockBufNow)
|
||||||
scheduleFlushDelete()
|
scheduleFlushOverwrites()
|
||||||
cb()
|
cb()
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
if (blocksWithDeletables.has(blockIndex)) {
|
if (blocksWithOverwritables.has(blockIndex)) {
|
||||||
const blockBuf = /** @type {any} */ (blocksWithDeletables.get(blockIndex))
|
const blockBuf = /** @type {any} */ (
|
||||||
|
blocksWithOverwritables.get(blockIndex)
|
||||||
|
)
|
||||||
gotBlockForDelete(null, blockBuf)
|
gotBlockForDelete(null, blockBuf)
|
||||||
} else {
|
} else {
|
||||||
getBlock(offset, gotBlockForDelete)
|
getBlock(offset, gotBlockForDelete)
|
||||||
|
@ -499,40 +501,46 @@ function Log(filename, opts) {
|
||||||
return offsetInBlock + Record.size(dataBuf) + Record.EOB_SIZE > blockSize
|
return offsetInBlock + Record.size(dataBuf) + Record.EOB_SIZE > blockSize
|
||||||
}
|
}
|
||||||
|
|
||||||
const scheduleFlushDelete = debounce(flushDelete, writeTimeout)
|
const scheduleFlushOverwrites = debounce(flushOverwrites, writeTimeout)
|
||||||
|
|
||||||
function flushDelete() {
|
function flushOverwrites() {
|
||||||
if (blocksWithDeletables.size === 0) {
|
if (blocksWithOverwritables.size === 0) {
|
||||||
for (const cb of waitingFlushDelete) cb()
|
for (const cb of waitingFlushOverwrites) cb()
|
||||||
waitingFlushDelete.length = 0
|
waitingFlushOverwrites.length = 0
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
const blockIndex = blocksWithDeletables.keys().next().value
|
const blockIndex = blocksWithOverwritables.keys().next().value
|
||||||
const blockStart = blockIndex * blockSize
|
const blockStart = blockIndex * blockSize
|
||||||
const blockBuf = blocksWithDeletables.get(blockIndex)
|
const blockBuf = blocksWithOverwritables.get(blockIndex)
|
||||||
blocksWithDeletables.delete(blockIndex)
|
blocksWithOverwritables.delete(blockIndex)
|
||||||
flushingDelete = true
|
flushingOverwrites = true
|
||||||
|
|
||||||
writeWithFSync(blockStart, blockBuf, null, function flushedDelete(err, _) {
|
writeWithFSync(
|
||||||
saveStats(function onSavedStats(err, _) {
|
blockStart,
|
||||||
if (err) debug('error saving stats: %s', err.message)
|
blockBuf,
|
||||||
flushingDelete = false
|
null,
|
||||||
if (err) {
|
function flushedOverwrites(err, _) {
|
||||||
for (const cb of waitingFlushDelete) cb(err)
|
if (err) debug('error flushing overwrites with fsync: %s', err.message)
|
||||||
waitingFlushDelete.length = 0
|
saveStats(function onSavedStats(err, _) {
|
||||||
return
|
if (err) debug('error saving stats: %s', err.message)
|
||||||
}
|
flushingOverwrites = false
|
||||||
flushDelete() // next
|
if (err) {
|
||||||
})
|
for (const cb of waitingFlushOverwrites) cb(err)
|
||||||
})
|
waitingFlushOverwrites.length = 0
|
||||||
|
return
|
||||||
|
}
|
||||||
|
flushOverwrites() // next
|
||||||
|
})
|
||||||
|
}
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param {CB<void>} cb
|
* @param {CB<void>} cb
|
||||||
*/
|
*/
|
||||||
function onDeletesFlushed(cb) {
|
function onOverwritesFlushed(cb) {
|
||||||
if (flushingDelete || blocksWithDeletables.size > 0) {
|
if (flushingOverwrites || blocksWithOverwritables.size > 0) {
|
||||||
waitingFlushDelete.push(cb)
|
waitingFlushOverwrites.push(cb)
|
||||||
} else cb()
|
} else cb()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -650,7 +658,7 @@ function Log(filename, opts) {
|
||||||
/**
|
/**
|
||||||
* @param {number} offset
|
* @param {number} offset
|
||||||
* @param {extractCodecType<typeof codec>} data
|
* @param {extractCodecType<typeof codec>} data
|
||||||
* @param {CB<null>} cb
|
* @param {CB<void>} cb
|
||||||
*/
|
*/
|
||||||
function overwrite(offset, data, cb) {
|
function overwrite(offset, data, cb) {
|
||||||
let encodedData = codec.encode(data)
|
let encodedData = codec.encode(data)
|
||||||
|
@ -659,6 +667,7 @@ function Log(filename, opts) {
|
||||||
assert(typeof latestBlockIndex === 'number', 'latestBlockIndex not set')
|
assert(typeof latestBlockIndex === 'number', 'latestBlockIndex not set')
|
||||||
assert(typeof nextOffsetInBlock === 'number', 'nextOffsetInBlock not set')
|
assert(typeof nextOffsetInBlock === 'number', 'nextOffsetInBlock not set')
|
||||||
const logSize = latestBlockIndex * blockSize + nextOffsetInBlock
|
const logSize = latestBlockIndex * blockSize + nextOffsetInBlock
|
||||||
|
const blockIndex = getBlockIndex(offset)
|
||||||
if (typeof offset !== 'number') return cb(nanOffsetErr(offset))
|
if (typeof offset !== 'number') return cb(nanOffsetErr(offset))
|
||||||
if (isNaN(offset)) return cb(nanOffsetErr(offset))
|
if (isNaN(offset)) return cb(nanOffsetErr(offset))
|
||||||
if (offset < 0) return cb(negativeOffsetErr(offset))
|
if (offset < 0) return cb(negativeOffsetErr(offset))
|
||||||
|
@ -667,9 +676,10 @@ function Log(filename, opts) {
|
||||||
// Get the existing record at offset
|
// Get the existing record at offset
|
||||||
getBlock(offset, function gotBlock(err, blockBuf) {
|
getBlock(offset, function gotBlock(err, blockBuf) {
|
||||||
if (err) return cb(err)
|
if (err) return cb(err)
|
||||||
|
const blockBufNow = blocksWithOverwritables.get(blockIndex) ?? blockBuf
|
||||||
const offsetInBlock = getOffsetInBlock(offset)
|
const offsetInBlock = getOffsetInBlock(offset)
|
||||||
const oldDataLength = Record.readDataLength(blockBuf, offsetInBlock)
|
const oldDataLength = Record.readDataLength(blockBufNow, offsetInBlock)
|
||||||
const oldEmptyLength = Record.readEmptyLength(blockBuf, offsetInBlock)
|
const oldEmptyLength = Record.readEmptyLength(blockBufNow, offsetInBlock)
|
||||||
// Make sure encodedData fits inside existing record
|
// Make sure encodedData fits inside existing record
|
||||||
if (Record.size(encodedData) > oldDataLength + oldEmptyLength) {
|
if (Record.size(encodedData) > oldDataLength + oldEmptyLength) {
|
||||||
return cb(overwriteLargerThanOld())
|
return cb(overwriteLargerThanOld())
|
||||||
|
@ -677,10 +687,10 @@ function Log(filename, opts) {
|
||||||
const newEmptyLength = oldDataLength - encodedData.length
|
const newEmptyLength = oldDataLength - encodedData.length
|
||||||
deletedBytes += newEmptyLength
|
deletedBytes += newEmptyLength
|
||||||
// write
|
// write
|
||||||
Record.write(blockBuf, offsetInBlock, encodedData, newEmptyLength)
|
Record.write(blockBufNow, offsetInBlock, encodedData, newEmptyLength)
|
||||||
|
blocksWithOverwritables.set(blockIndex, blockBufNow)
|
||||||
const blockStart = getBlockStart(offset)
|
scheduleFlushOverwrites()
|
||||||
writeWithFSync(blockStart, blockBuf, null, cb)
|
cb()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -755,7 +765,7 @@ function Log(filename, opts) {
|
||||||
// }
|
// }
|
||||||
// onStreamsDone(function startCompactAfterStreamsDone() {
|
// onStreamsDone(function startCompactAfterStreamsDone() {
|
||||||
// onDrain(function startCompactAfterDrain() {
|
// onDrain(function startCompactAfterDrain() {
|
||||||
// onDeletesFlushed(function startCompactAfterDeletes() {
|
// onOverwritesFlushed(function startCompactAfterDeletes() {
|
||||||
// if (compactionProgress.value.done) {
|
// if (compactionProgress.value.done) {
|
||||||
// compactionProgress.set({ percent: 0, done: false })
|
// compactionProgress.set({ percent: 0, done: false })
|
||||||
// }
|
// }
|
||||||
|
@ -793,7 +803,7 @@ function Log(filename, opts) {
|
||||||
*/
|
*/
|
||||||
function close(cb) {
|
function close(cb) {
|
||||||
onDrain(function closeAfterHavingDrained() {
|
onDrain(function closeAfterHavingDrained() {
|
||||||
onDeletesFlushed(function closeAfterDeletesFlushed() {
|
onOverwritesFlushed(function closeAfterOverwritesFlushed() {
|
||||||
raf.close(cb)
|
raf.close(cb)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -854,7 +864,7 @@ function Log(filename, opts) {
|
||||||
overwrite: onLoad(overwrite), // TODO
|
overwrite: onLoad(overwrite), // TODO
|
||||||
close: onLoad(close), // TODO
|
close: onLoad(close), // TODO
|
||||||
onDrain: onLoad(onDrain), // TODO
|
onDrain: onLoad(onDrain), // TODO
|
||||||
onDeletesFlushed: onLoad(onDeletesFlushed),
|
onOverwritesFlushed: onLoad(onOverwritesFlushed),
|
||||||
// compact: onLoad(compact), // FIXME:
|
// compact: onLoad(compact), // FIXME:
|
||||||
// compactionProgress,
|
// compactionProgress,
|
||||||
since,
|
since,
|
||||||
|
|
|
@ -37,7 +37,7 @@ test('Log deletes', async (t) => {
|
||||||
assert.equal(buf3.toString(), msg3.toString())
|
assert.equal(buf3.toString(), msg3.toString())
|
||||||
|
|
||||||
await p(log.del)(offset2)
|
await p(log.del)(offset2)
|
||||||
await p(log.onDeletesFlushed)()
|
await p(log.onOverwritesFlushed)()
|
||||||
await assert.rejects(p(log._get)(offset2), (err) => {
|
await assert.rejects(p(log._get)(offset2), (err) => {
|
||||||
assert.ok(err)
|
assert.ok(err)
|
||||||
assert.equal(err.message, 'Record has been deleted')
|
assert.equal(err.message, 'Record has been deleted')
|
||||||
|
@ -80,7 +80,7 @@ test('Log deletes', async (t) => {
|
||||||
const offset3 = await p(log.append)({ text: 'm2' })
|
const offset3 = await p(log.append)({ text: 'm2' })
|
||||||
|
|
||||||
await p(log.del)(offset2)
|
await p(log.del)(offset2)
|
||||||
await p(log.onDeletesFlushed)()
|
await p(log.onOverwritesFlushed)()
|
||||||
|
|
||||||
await p(log.close)()
|
await p(log.close)()
|
||||||
|
|
||||||
|
@ -122,7 +122,7 @@ test('Log deletes', async (t) => {
|
||||||
|
|
||||||
await p(log.del)(offset1)
|
await p(log.del)(offset1)
|
||||||
await p(log.onDrain)()
|
await p(log.onDrain)()
|
||||||
await p(log.onDeletesFlushed)()
|
await p(log.onOverwritesFlushed)()
|
||||||
|
|
||||||
const arr = []
|
const arr = []
|
||||||
await new Promise((resolve) => {
|
await new Promise((resolve) => {
|
||||||
|
@ -166,7 +166,7 @@ test('Log deletes', async (t) => {
|
||||||
if (process.env.VERBOSE) console.timeEnd('delete ' + TOTAL / 2)
|
if (process.env.VERBOSE) console.timeEnd('delete ' + TOTAL / 2)
|
||||||
assert('deleted messages')
|
assert('deleted messages')
|
||||||
|
|
||||||
await p(log.onDeletesFlushed)()
|
await p(log.onOverwritesFlushed)()
|
||||||
|
|
||||||
await new Promise((resolve) => {
|
await new Promise((resolve) => {
|
||||||
let i = 0
|
let i = 0
|
||||||
|
|
|
@ -26,6 +26,7 @@ test('Log overwrites', async (t) => {
|
||||||
assert.equal(buf2.toString(), msg2.toString())
|
assert.equal(buf2.toString(), msg2.toString())
|
||||||
|
|
||||||
await p(log.overwrite)(offset1, Buffer.from('hi world'))
|
await p(log.overwrite)(offset1, Buffer.from('hi world'))
|
||||||
|
await p(log.onOverwritesFlushed)()
|
||||||
const buf = await p(log._get)(offset1)
|
const buf = await p(log._get)(offset1)
|
||||||
assert.equal(buf.toString(), 'hi world')
|
assert.equal(buf.toString(), 'hi world')
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue