mirror of https://codeberg.org/pzp/pzp-db.git
log: implement naive overwrite()
This commit is contained in:
parent
3636acaaa3
commit
153e75da8e
|
@ -53,6 +53,11 @@ function compactWithMaxLiveStreamErr() {
|
||||||
return new Error('Compaction cannot run if there are live streams configured with opts.lt or opts.lte')
|
return new Error('Compaction cannot run if there are live streams configured with opts.lt or opts.lte')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function overwriteLargerThanOld() {
|
||||||
|
// prettier-ignore
|
||||||
|
return new Error('Data to be overwritten should not be larger than existing data')
|
||||||
|
}
|
||||||
|
|
||||||
function appendLargerThanBlockErr() {
|
function appendLargerThanBlockErr() {
|
||||||
return new Error('Data to be appended is larger than block size')
|
return new Error('Data to be appended is larger than block size')
|
||||||
}
|
}
|
||||||
|
@ -70,6 +75,7 @@ module.exports = {
|
||||||
deletedRecordErr,
|
deletedRecordErr,
|
||||||
delDuringCompactErr,
|
delDuringCompactErr,
|
||||||
compactWithMaxLiveStreamErr,
|
compactWithMaxLiveStreamErr,
|
||||||
|
overwriteLargerThanOld,
|
||||||
appendLargerThanBlockErr,
|
appendLargerThanBlockErr,
|
||||||
unexpectedTruncationErr,
|
unexpectedTruncationErr,
|
||||||
}
|
}
|
||||||
|
|
107
lib/log/index.js
107
lib/log/index.js
|
@ -17,6 +17,7 @@ const {
|
||||||
// delDuringCompactErr,
|
// delDuringCompactErr,
|
||||||
appendLargerThanBlockErr,
|
appendLargerThanBlockErr,
|
||||||
unexpectedTruncationErr,
|
unexpectedTruncationErr,
|
||||||
|
overwriteLargerThanOld,
|
||||||
// compactWithMaxLiveStreamErr,
|
// compactWithMaxLiveStreamErr,
|
||||||
} = require('./errors')
|
} = require('./errors')
|
||||||
const Record = require('./record')
|
const Record = require('./record')
|
||||||
|
@ -67,17 +68,6 @@ function assert(check, message) {
|
||||||
if (!check) throw new Error(message)
|
if (!check) throw new Error(message)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* The "End of Block" is a special field used to mark the end of a block, and
|
|
||||||
* in practice it's like a Record header "length" field, with the value 0.
|
|
||||||
* In most cases, the end region of a block will have a larger length than this,
|
|
||||||
* but we want to guarantee there is at *least* this many bytes at the end.
|
|
||||||
*/
|
|
||||||
const EOB = {
|
|
||||||
SIZE: Record.HEADER_SIZE,
|
|
||||||
asNumber: 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
const DEFAULT_BLOCK_SIZE = 65536
|
const DEFAULT_BLOCK_SIZE = 65536
|
||||||
const DEFAULT_WRITE_TIMEOUT = 250
|
const DEFAULT_WRITE_TIMEOUT = 250
|
||||||
const DEFAULT_VALIDATE = () => true
|
const DEFAULT_VALIDATE = () => true
|
||||||
|
@ -89,7 +79,7 @@ const DEFAULT_VALIDATE = () => true
|
||||||
* @param {string} filename
|
* @param {string} filename
|
||||||
* @param {Options<T>} opts
|
* @param {Options<T>} opts
|
||||||
*/
|
*/
|
||||||
function AsyncAppendOnlyLog(filename, opts) {
|
function Log(filename, opts) {
|
||||||
const DEFAULT_CODEC = /** @type {Codec<T>} */ (
|
const DEFAULT_CODEC = /** @type {Codec<T>} */ (
|
||||||
/** @type {any} */ ({
|
/** @type {any} */ ({
|
||||||
encode: (/** @type {any} */ x) => x,
|
encode: (/** @type {any} */ x) => x,
|
||||||
|
@ -327,18 +317,17 @@ function AsyncAppendOnlyLog(filename, opts) {
|
||||||
*/
|
*/
|
||||||
function getLastGoodRecord(blockBuf, blockStart, cb) {
|
function getLastGoodRecord(blockBuf, blockStart, cb) {
|
||||||
let lastGoodOffset = 0
|
let lastGoodOffset = 0
|
||||||
for (let offsetInRecord = 0; offsetInRecord < blockSize; ) {
|
for (let offsetInRec = 0; offsetInRec < blockSize; ) {
|
||||||
const length = Record.readDataLength(blockBuf, offsetInRecord)
|
if (Record.isEOB(blockBuf, offsetInRec)) break
|
||||||
if (length === EOB.asNumber) break
|
const [dataBuf, recSize, dataLength] = Record.read(blockBuf, offsetInRec)
|
||||||
const [dataBuf, recSize] = Record.read(blockBuf, offsetInRecord)
|
const isLengthCorrupt = offsetInRec + recSize > blockSize
|
||||||
const isLengthCorrupt = offsetInRecord + recSize > blockSize
|
const isDataCorrupt = dataLength > 0 && !validateRecord(dataBuf)
|
||||||
const isDataCorrupt = !isBufferZero(dataBuf) && !validateRecord(dataBuf)
|
|
||||||
if (isLengthCorrupt || isDataCorrupt) {
|
if (isLengthCorrupt || isDataCorrupt) {
|
||||||
fixBlock(blockBuf, offsetInRecord, blockStart, lastGoodOffset, cb)
|
fixBlock(blockBuf, offsetInRec, blockStart, lastGoodOffset, cb)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
lastGoodOffset = offsetInRecord
|
lastGoodOffset = offsetInRec
|
||||||
offsetInRecord += recSize
|
offsetInRec += recSize
|
||||||
}
|
}
|
||||||
|
|
||||||
cb(null, lastGoodOffset)
|
cb(null, lastGoodOffset)
|
||||||
|
@ -388,8 +377,12 @@ function AsyncAppendOnlyLog(filename, opts) {
|
||||||
|
|
||||||
getBlock(offset, function gotBlock(err, blockBuf) {
|
getBlock(offset, function gotBlock(err, blockBuf) {
|
||||||
if (err) return cb(err)
|
if (err) return cb(err)
|
||||||
const [dataBuf] = Record.read(blockBuf, getOffsetInBlock(offset))
|
const offsetInBlock = getOffsetInBlock(offset)
|
||||||
if (isBufferZero(dataBuf)) return cb(deletedRecordErr())
|
const [dataBuf, _recSize, dataLength, emptyLength] = Record.read(
|
||||||
|
blockBuf,
|
||||||
|
offsetInBlock
|
||||||
|
)
|
||||||
|
if (dataLength === 0 && emptyLength > 0) return cb(deletedRecordErr())
|
||||||
// @ts-ignore
|
// @ts-ignore
|
||||||
cb(null, codec.decode(dataBuf))
|
cb(null, codec.decode(dataBuf))
|
||||||
})
|
})
|
||||||
|
@ -407,18 +400,21 @@ function AsyncAppendOnlyLog(filename, opts) {
|
||||||
*/
|
*/
|
||||||
function getDataNextOffset(blockBuf, offset) {
|
function getDataNextOffset(blockBuf, offset) {
|
||||||
const offsetInBlock = getOffsetInBlock(offset)
|
const offsetInBlock = getOffsetInBlock(offset)
|
||||||
const [dataBuf, recSize] = Record.read(blockBuf, offsetInBlock)
|
const [dataBuf, recSize, dataLength, emptyLength] = Record.read(
|
||||||
const nextLength = Record.readDataLength(blockBuf, offsetInBlock + recSize)
|
blockBuf,
|
||||||
|
offsetInBlock
|
||||||
|
)
|
||||||
|
const nextOffsetInBlock = offsetInBlock + recSize
|
||||||
|
|
||||||
let nextOffset
|
let nextOffset
|
||||||
if (nextLength === EOB.asNumber) {
|
if (Record.isEOB(blockBuf, nextOffsetInBlock)) {
|
||||||
if (getNextBlockStart(offset) > since.value) nextOffset = -1
|
if (getNextBlockStart(offset) > since.value) nextOffset = -1
|
||||||
else nextOffset = 0
|
else nextOffset = 0
|
||||||
} else {
|
} else {
|
||||||
nextOffset = offset + recSize
|
nextOffset = offset + recSize
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isBufferZero(dataBuf)) return [nextOffset, null, recSize]
|
if (dataLength === 0 && emptyLength > 0) return [nextOffset, null, recSize]
|
||||||
else return [nextOffset, codec.decode(dataBuf), recSize]
|
else return [nextOffset, codec.decode(dataBuf), recSize]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -478,11 +474,9 @@ function AsyncAppendOnlyLog(filename, opts) {
|
||||||
if (err) return cb(err)
|
if (err) return cb(err)
|
||||||
assert(blockBuf, 'blockBuf should be defined in gotBlockForDelete')
|
assert(blockBuf, 'blockBuf should be defined in gotBlockForDelete')
|
||||||
const actualBlockBuf = blocksWithDeletables.get(blockIndex) ?? blockBuf
|
const actualBlockBuf = blocksWithDeletables.get(blockIndex) ?? blockBuf
|
||||||
Record.overwriteWithZeroes(actualBlockBuf, getOffsetInBlock(offset))
|
const offsetInBlock = getOffsetInBlock(offset)
|
||||||
deletedBytes += Record.readSize(
|
Record.overwriteAsEmpty(actualBlockBuf, offsetInBlock)
|
||||||
actualBlockBuf,
|
deletedBytes += Record.readSize(actualBlockBuf, offsetInBlock)
|
||||||
getOffsetInBlock(offset)
|
|
||||||
)
|
|
||||||
blocksWithDeletables.set(blockIndex, actualBlockBuf)
|
blocksWithDeletables.set(blockIndex, actualBlockBuf)
|
||||||
scheduleFlushDelete()
|
scheduleFlushDelete()
|
||||||
cb()
|
cb()
|
||||||
|
@ -502,7 +496,7 @@ function AsyncAppendOnlyLog(filename, opts) {
|
||||||
* @param {number} offsetInBlock
|
* @param {number} offsetInBlock
|
||||||
*/
|
*/
|
||||||
function hasNoSpaceFor(dataBuf, offsetInBlock) {
|
function hasNoSpaceFor(dataBuf, offsetInBlock) {
|
||||||
return offsetInBlock + Record.size(dataBuf) + EOB.SIZE > blockSize
|
return offsetInBlock + Record.size(dataBuf) + Record.EOB_SIZE > blockSize
|
||||||
}
|
}
|
||||||
|
|
||||||
const scheduleFlushDelete = debounce(flushDelete, writeTimeout)
|
const scheduleFlushDelete = debounce(flushDelete, writeTimeout)
|
||||||
|
@ -550,8 +544,9 @@ function AsyncAppendOnlyLog(filename, opts) {
|
||||||
let encodedData = codec.encode(data)
|
let encodedData = codec.encode(data)
|
||||||
if (typeof encodedData === 'string') encodedData = b4a.from(encodedData)
|
if (typeof encodedData === 'string') encodedData = b4a.from(encodedData)
|
||||||
|
|
||||||
if (Record.size(encodedData) + EOB.SIZE > blockSize)
|
if (Record.size(encodedData) + Record.EOB_SIZE > blockSize) {
|
||||||
throw appendLargerThanBlockErr()
|
throw appendLargerThanBlockErr()
|
||||||
|
}
|
||||||
|
|
||||||
assert(typeof latestBlockIndex === 'number', 'latestBlockIndex not set')
|
assert(typeof latestBlockIndex === 'number', 'latestBlockIndex not set')
|
||||||
assert(typeof nextOffsetInBlock === 'number', 'nextOffsetInBlock not set')
|
assert(typeof nextOffsetInBlock === 'number', 'nextOffsetInBlock not set')
|
||||||
|
@ -646,12 +641,49 @@ function AsyncAppendOnlyLog(filename, opts) {
|
||||||
* @param {B4A } blockBuf
|
* @param {B4A } blockBuf
|
||||||
* @param {CB<null>} cb
|
* @param {CB<null>} cb
|
||||||
*/
|
*/
|
||||||
function overwrite(blockIndex, blockBuf, cb) {
|
function overwriteBlock(blockIndex, blockBuf, cb) {
|
||||||
cache.set(blockIndex, blockBuf)
|
cache.set(blockIndex, blockBuf)
|
||||||
const blockStart = blockIndex * blockSize
|
const blockStart = blockIndex * blockSize
|
||||||
writeWithFSync(blockStart, blockBuf, null, cb)
|
writeWithFSync(blockStart, blockBuf, null, cb)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param {number} offset
|
||||||
|
* @param {extractCodecType<typeof codec>} data
|
||||||
|
* @param {CB<null>} cb
|
||||||
|
*/
|
||||||
|
function overwrite(offset, data, cb) {
|
||||||
|
let encodedData = codec.encode(data)
|
||||||
|
if (typeof encodedData === 'string') encodedData = b4a.from(encodedData)
|
||||||
|
|
||||||
|
assert(typeof latestBlockIndex === 'number', 'latestBlockIndex not set')
|
||||||
|
assert(typeof nextOffsetInBlock === 'number', 'nextOffsetInBlock not set')
|
||||||
|
const logSize = latestBlockIndex * blockSize + nextOffsetInBlock
|
||||||
|
if (typeof offset !== 'number') return cb(nanOffsetErr(offset))
|
||||||
|
if (isNaN(offset)) return cb(nanOffsetErr(offset))
|
||||||
|
if (offset < 0) return cb(negativeOffsetErr(offset))
|
||||||
|
if (offset >= logSize) return cb(outOfBoundsOffsetErr(offset, logSize))
|
||||||
|
|
||||||
|
// Get the existing record at offset
|
||||||
|
getBlock(offset, function gotBlock(err, blockBuf) {
|
||||||
|
if (err) return cb(err)
|
||||||
|
const offsetInBlock = getOffsetInBlock(offset)
|
||||||
|
const oldDataLength = Record.readDataLength(blockBuf, offsetInBlock)
|
||||||
|
const oldEmptyLength = Record.readEmptyLength(blockBuf, offsetInBlock)
|
||||||
|
// Make sure encodedData fits inside existing record
|
||||||
|
if (Record.size(encodedData) > oldDataLength + oldEmptyLength) {
|
||||||
|
return cb(overwriteLargerThanOld())
|
||||||
|
}
|
||||||
|
const newEmptyLength = oldDataLength - encodedData.length
|
||||||
|
deletedBytes += newEmptyLength
|
||||||
|
// write
|
||||||
|
Record.write(blockBuf, offsetInBlock, encodedData, newEmptyLength)
|
||||||
|
|
||||||
|
const blockStart = getBlockStart(offset)
|
||||||
|
writeWithFSync(blockStart, blockBuf, null, cb)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param {number} newLatestBlockIndex
|
* @param {number} newLatestBlockIndex
|
||||||
* @param {CB<number>} cb
|
* @param {CB<number>} cb
|
||||||
|
@ -819,6 +851,7 @@ function AsyncAppendOnlyLog(filename, opts) {
|
||||||
scan: onLoad(scan), // TODO
|
scan: onLoad(scan), // TODO
|
||||||
del: onLoad(del), // TODO
|
del: onLoad(del), // TODO
|
||||||
append: onLoad(append), // TODO
|
append: onLoad(append), // TODO
|
||||||
|
overwrite: onLoad(overwrite), // TODO
|
||||||
close: onLoad(close), // TODO
|
close: onLoad(close), // TODO
|
||||||
onDrain: onLoad(onDrain), // TODO
|
onDrain: onLoad(onDrain), // TODO
|
||||||
onDeletesFlushed: onLoad(onDeletesFlushed),
|
onDeletesFlushed: onLoad(onDeletesFlushed),
|
||||||
|
@ -830,7 +863,7 @@ function AsyncAppendOnlyLog(filename, opts) {
|
||||||
// Internals needed by ./compaction.js:
|
// Internals needed by ./compaction.js:
|
||||||
filename,
|
filename,
|
||||||
blockSize,
|
blockSize,
|
||||||
overwrite,
|
overwriteBlock,
|
||||||
truncate,
|
truncate,
|
||||||
hasNoSpaceFor,
|
hasNoSpaceFor,
|
||||||
|
|
||||||
|
@ -839,4 +872,4 @@ function AsyncAppendOnlyLog(filename, opts) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = AsyncAppendOnlyLog
|
module.exports = Log
|
||||||
|
|
|
@ -8,20 +8,22 @@ const b4a = require('b4a')
|
||||||
Binary format for a Record:
|
Binary format for a Record:
|
||||||
|
|
||||||
<record>
|
<record>
|
||||||
<dataLength: UInt16LE>
|
<dataLength: UInt16LE><emptyLength: UInt16LE>
|
||||||
<dataBuf: Arbitrary Bytes>
|
<dataBuf: Arbitrary Bytes or empty Bytes>
|
||||||
</record>
|
</record>
|
||||||
|
|
||||||
The "Header" is the first two bytes for the dataLength.
|
The "Header" is the first two bytes for the dataLength.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
const HEADER_SIZE = 2 // uint16
|
const HEADER_D = 2 // uint16
|
||||||
|
const HEADER_E = 2 // uint16
|
||||||
|
const HEADER_SIZE = HEADER_D + HEADER_E // uint16
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param {B4A} dataBuf
|
* @param {B4A} dataBuf
|
||||||
*/
|
*/
|
||||||
function size(dataBuf) {
|
function size(dataBuf) {
|
||||||
return HEADER_SIZE + dataBuf.length
|
return HEADER_D + HEADER_E + dataBuf.length
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -41,54 +43,127 @@ function readDataLength(blockBuf, offsetInBlock) {
|
||||||
* @param {B4A} blockBuf
|
* @param {B4A} blockBuf
|
||||||
* @param {number} offsetInBlock
|
* @param {number} offsetInBlock
|
||||||
*/
|
*/
|
||||||
function readSize(blockBuf, offsetInBlock) {
|
function readEmptyLength(blockBuf, offsetInBlock) {
|
||||||
const dataLength = readDataLength(blockBuf, offsetInBlock)
|
const view = new DataView(
|
||||||
return HEADER_SIZE + dataLength
|
blockBuf.buffer,
|
||||||
|
blockBuf.byteOffset,
|
||||||
|
blockBuf.byteLength
|
||||||
|
)
|
||||||
|
return view.getUint16(offsetInBlock + 2, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param {B4A} blockBuf
|
* @param {B4A} blockBuf
|
||||||
* @param {number} offsetInBlock
|
* @param {number} offsetInBlock
|
||||||
* @returns {[B4A, number]}
|
*/
|
||||||
|
function isEmpty(blockBuf, offsetInBlock) {
|
||||||
|
return (
|
||||||
|
readDataLength(blockBuf, offsetInBlock) === 0 &&
|
||||||
|
readEmptyLength(blockBuf, offsetInBlock) > 0
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// const EOB = {
|
||||||
|
// SIZE: Record.HEADER_SIZE,
|
||||||
|
// asNumber: 0,
|
||||||
|
// }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The "End of Block" is a special field 4-bytes-long used to mark the end of a
|
||||||
|
* block, and in practice it's like a Record header "dataLength" and
|
||||||
|
* "emptyLength" fields both with the value 0.
|
||||||
|
*
|
||||||
|
* In most cases, the end region of a block will be much more than 4 bytes of
|
||||||
|
* zero, but we want to guarantee there is at *least* 4 bytes at the end.
|
||||||
|
* @param {B4A} blockBuf
|
||||||
|
* @param {number} offsetInBlock
|
||||||
|
*/
|
||||||
|
function isEOB(blockBuf, offsetInBlock) {
|
||||||
|
return (
|
||||||
|
readDataLength(blockBuf, offsetInBlock) === 0 &&
|
||||||
|
readEmptyLength(blockBuf, offsetInBlock) === 0
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param {B4A} blockBuf
|
||||||
|
* @param {number} offsetInBlock
|
||||||
|
*/
|
||||||
|
function readSize(blockBuf, offsetInBlock) {
|
||||||
|
const dataLength = readDataLength(blockBuf, offsetInBlock)
|
||||||
|
const emptyLength = readEmptyLength(blockBuf, offsetInBlock)
|
||||||
|
return HEADER_D + HEADER_E + dataLength + emptyLength
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param {B4A} blockBuf
|
||||||
|
* @param {number} offsetInBlock
|
||||||
|
* @returns {[B4A, number, number, number]}
|
||||||
*/
|
*/
|
||||||
function read(blockBuf, offsetInBlock) {
|
function read(blockBuf, offsetInBlock) {
|
||||||
const dataLength = readDataLength(blockBuf, offsetInBlock)
|
const dataLength = readDataLength(blockBuf, offsetInBlock)
|
||||||
const dataStart = offsetInBlock + HEADER_SIZE
|
const emptyLength = readEmptyLength(blockBuf, offsetInBlock)
|
||||||
const dataBuf = blockBuf.slice(dataStart, dataStart + dataLength)
|
const dataStart = offsetInBlock + HEADER_D + HEADER_E
|
||||||
const size = HEADER_SIZE + dataLength
|
const dataBuf = blockBuf.subarray(dataStart, dataStart + dataLength)
|
||||||
return [dataBuf, size]
|
const size = HEADER_D + HEADER_E + dataLength + emptyLength
|
||||||
|
return [dataBuf, size, dataLength, emptyLength]
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param {B4A} blockBuf
|
* @param {B4A} blockBuf
|
||||||
* @param {number} offsetInBlock
|
* @param {number} offsetInBlock
|
||||||
* @param {B4A} dataBuf
|
* @param {B4A} dataBuf
|
||||||
|
* @param {number} emptySize
|
||||||
*/
|
*/
|
||||||
function write(blockBuf, offsetInBlock, dataBuf) {
|
function write(blockBuf, offsetInBlock, dataBuf, emptySize = 0) {
|
||||||
// write dataLength
|
const dataSize = dataBuf.length
|
||||||
const view = new DataView(blockBuf.buffer, blockBuf.byteOffset, blockBuf.byteLength)
|
const dataHeaderPos = offsetInBlock
|
||||||
view.setUint16(offsetInBlock, dataBuf.length, true)
|
const emptyHeaderPos = dataHeaderPos + HEADER_D
|
||||||
// write dataBuf
|
const dataBodyPos = emptyHeaderPos + HEADER_E
|
||||||
b4a.copy(dataBuf, blockBuf, offsetInBlock + HEADER_SIZE)
|
const emptyBodyPos = dataBodyPos + dataSize
|
||||||
|
|
||||||
|
// write header
|
||||||
|
{
|
||||||
|
const view = new DataView(
|
||||||
|
blockBuf.buffer,
|
||||||
|
blockBuf.byteOffset,
|
||||||
|
blockBuf.byteLength
|
||||||
|
)
|
||||||
|
view.setUint16(dataHeaderPos, dataSize, true)
|
||||||
|
if (emptySize > 0) {
|
||||||
|
view.setUint16(emptyHeaderPos, emptySize, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// write body
|
||||||
|
{
|
||||||
|
if (dataSize > 0) {
|
||||||
|
b4a.copy(dataBuf, blockBuf, dataBodyPos)
|
||||||
|
}
|
||||||
|
if (emptySize > 0) {
|
||||||
|
b4a.fill(blockBuf, 0, emptyBodyPos, emptyBodyPos + emptySize)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param {B4A} blockBuf
|
* @param {B4A} blockBuf
|
||||||
* @param {number} offsetInBlock
|
* @param {number} offsetInBlock
|
||||||
*/
|
*/
|
||||||
function overwriteWithZeroes(blockBuf, offsetInBlock) {
|
function overwriteAsEmpty(blockBuf, offsetInBlock) {
|
||||||
const dataLength = readDataLength(blockBuf, offsetInBlock)
|
const dataLength = readDataLength(blockBuf, offsetInBlock)
|
||||||
const dataStart = offsetInBlock + HEADER_SIZE
|
write(blockBuf, offsetInBlock, b4a.alloc(0), dataLength)
|
||||||
const dataEnd = dataStart + dataLength
|
|
||||||
blockBuf.fill(0, dataStart, dataEnd)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
HEADER_SIZE,
|
EOB_SIZE: HEADER_D + HEADER_E,
|
||||||
size,
|
size,
|
||||||
readDataLength,
|
readDataLength,
|
||||||
|
readEmptyLength,
|
||||||
readSize,
|
readSize,
|
||||||
read,
|
read,
|
||||||
write,
|
write,
|
||||||
overwriteWithZeroes,
|
overwriteAsEmpty,
|
||||||
|
isEmpty,
|
||||||
|
isEOB,
|
||||||
}
|
}
|
||||||
|
|
|
@ -177,6 +177,8 @@ The `prev` array for a tangle should list:
|
||||||
|
|
||||||
Whenever we need to serialize any JSON in the context of creating a Feed V1 message, we follow the "JSON Canonicalization Scheme" (JSC) defined by [RFC 8785](https://tools.ietf.org/html/rfc8785).
|
Whenever we need to serialize any JSON in the context of creating a Feed V1 message, we follow the "JSON Canonicalization Scheme" (JSC) defined by [RFC 8785](https://tools.ietf.org/html/rfc8785).
|
||||||
|
|
||||||
|
A serialized msg must not be larger than 65535 UTF-8 bytes.
|
||||||
|
|
||||||
# Msg V2
|
# Msg V2
|
||||||
|
|
||||||
Background: https://github.com/ssbc/ssb2-discussion-forum/issues/24
|
Background: https://github.com/ssbc/ssb2-discussion-forum/issues/24
|
||||||
|
|
|
@ -50,7 +50,7 @@ test('add()', async (t) => {
|
||||||
|
|
||||||
await p(peer.db._getLog().onDrain)()
|
await p(peer.db._getLog().onDrain)()
|
||||||
const stats = await p(peer.db.logStats)()
|
const stats = await p(peer.db.logStats)()
|
||||||
assert.deepEqual(stats, { totalBytes: 900, deletedBytes: 0 })
|
assert.deepEqual(stats, { totalBytes: 904, deletedBytes: 0 })
|
||||||
|
|
||||||
await p(peer.close)(true)
|
await p(peer.close)(true)
|
||||||
})
|
})
|
||||||
|
|
|
@ -19,7 +19,7 @@ test('Log basics', async function (t) {
|
||||||
assert.equal(offset1, 0)
|
assert.equal(offset1, 0)
|
||||||
|
|
||||||
const offset2 = await p(log.append)(msg2)
|
const offset2 = await p(log.append)(msg2)
|
||||||
assert.equal(offset2, msg1.length + 2)
|
assert.equal(offset2, msg1.length + 4)
|
||||||
|
|
||||||
const b1 = await p(log._get)(offset1)
|
const b1 = await p(log._get)(offset1)
|
||||||
assert.equal(b1.toString(), msg1.toString())
|
assert.equal(b1.toString(), msg1.toString())
|
||||||
|
@ -47,7 +47,7 @@ test('Log basics', async function (t) {
|
||||||
assert.equal(offset1, 0)
|
assert.equal(offset1, 0)
|
||||||
|
|
||||||
const offset2 = await p(log.append)(json2)
|
const offset2 = await p(log.append)(json2)
|
||||||
assert.equal(offset2, 20)
|
assert.equal(offset2, 22)
|
||||||
|
|
||||||
const rec1 = await p(log._get)(offset1)
|
const rec1 = await p(log._get)(offset1)
|
||||||
assert.deepEqual(rec1, json1)
|
assert.deepEqual(rec1, json1)
|
||||||
|
@ -66,12 +66,12 @@ test('Log basics', async function (t) {
|
||||||
})
|
})
|
||||||
|
|
||||||
await p(log.onDrain)()
|
await p(log.onDrain)()
|
||||||
assert.equal(log.since.value, 20)
|
assert.equal(log.since.value, 22)
|
||||||
|
|
||||||
const rec1 = await p(log._get)(0)
|
const rec1 = await p(log._get)(0)
|
||||||
assert.deepEqual(rec1, json1)
|
assert.deepEqual(rec1, json1)
|
||||||
|
|
||||||
const rec2 = await p(log._get)(20)
|
const rec2 = await p(log._get)(22)
|
||||||
assert.deepEqual(rec2, json2)
|
assert.deepEqual(rec2, json2)
|
||||||
|
|
||||||
await p(log.close)()
|
await p(log.close)()
|
||||||
|
|
|
@ -107,9 +107,9 @@ test('Log handles corrupted length', async (t) => {
|
||||||
const msg2 = encode({ bool: true, test: 'testing2' })
|
const msg2 = encode({ bool: true, test: 'testing2' })
|
||||||
|
|
||||||
block.writeUInt16LE(msg1.length, 0)
|
block.writeUInt16LE(msg1.length, 0)
|
||||||
msg1.copy(block, 2)
|
msg1.copy(block, 4)
|
||||||
block.writeUInt16LE(65534, 2 + msg1.length) // corrupt!
|
block.writeUInt16LE(65534, 4 + msg1.length) // corrupt!
|
||||||
msg2.copy(block, 2 + msg1.length + 2)
|
msg2.copy(block, 4 + msg1.length + 4)
|
||||||
|
|
||||||
await p(raf.write.bind(raf))(0, block)
|
await p(raf.write.bind(raf))(0, block)
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ test('Log fix buggy write', async (t) => {
|
||||||
const offset1 = await p(log.append)(msg1)
|
const offset1 = await p(log.append)(msg1)
|
||||||
assert.equal(offset1, 0)
|
assert.equal(offset1, 0)
|
||||||
const offset2 = await p(log.append)(msg2)
|
const offset2 = await p(log.append)(msg2)
|
||||||
assert.equal(offset2, 36)
|
assert.equal(offset2, 38)
|
||||||
|
|
||||||
await p(log.onDrain)()
|
await p(log.onDrain)()
|
||||||
let arr = []
|
let arr = []
|
||||||
|
|
|
@ -0,0 +1,79 @@
|
||||||
|
const test = require('node:test')
|
||||||
|
const assert = require('node:assert')
|
||||||
|
const fs = require('node:fs')
|
||||||
|
const p = require('node:util').promisify
|
||||||
|
const Log = require('../../lib/log')
|
||||||
|
|
||||||
|
const msg1 = Buffer.from('hello world hello world hello world')
|
||||||
|
const msg2 = Buffer.from('ola mundo ola mundo ola mundo')
|
||||||
|
|
||||||
|
test('Log overwrites', async (t) => {
|
||||||
|
await t.test('Simple overwrite', async (t) => {
|
||||||
|
const file = '/tmp/ppppp-db-log-test-overwrite.log'
|
||||||
|
try {
|
||||||
|
fs.unlinkSync(file)
|
||||||
|
} catch (_) {}
|
||||||
|
const log = Log(file, { blockSize: 2 * 1024 })
|
||||||
|
|
||||||
|
const offset1 = await p(log.append)(msg1)
|
||||||
|
assert.equal(offset1, 0)
|
||||||
|
const offset2 = await p(log.append)(msg2)
|
||||||
|
assert.ok(offset2 > offset1)
|
||||||
|
|
||||||
|
const buf1 = await p(log._get)(offset1)
|
||||||
|
assert.equal(buf1.toString(), msg1.toString())
|
||||||
|
const buf2 = await p(log._get)(offset2)
|
||||||
|
assert.equal(buf2.toString(), msg2.toString())
|
||||||
|
|
||||||
|
await p(log.overwrite)(offset1, Buffer.from('hi world'))
|
||||||
|
const buf = await p(log._get)(offset1)
|
||||||
|
assert.equal(buf.toString(), 'hi world')
|
||||||
|
|
||||||
|
let arr = []
|
||||||
|
await new Promise((resolve, reject) => {
|
||||||
|
log.scan(
|
||||||
|
(offset, data, size) => {
|
||||||
|
arr.push(data.toString())
|
||||||
|
},
|
||||||
|
(err) => {
|
||||||
|
if (err) reject(err)
|
||||||
|
else resolve()
|
||||||
|
}
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.deepEqual(arr, ['hi world', 'ola mundo ola mundo ola mundo'])
|
||||||
|
|
||||||
|
await p(log.close)()
|
||||||
|
})
|
||||||
|
|
||||||
|
await t.test('Cannot overwrite larger data', async (t) => {
|
||||||
|
const file = '/tmp/ppppp-db-log-test-overwrite-larger.log'
|
||||||
|
try {
|
||||||
|
fs.unlinkSync(file)
|
||||||
|
} catch (_) {}
|
||||||
|
const log = Log(file, { blockSize: 2 * 1024 })
|
||||||
|
|
||||||
|
const offset1 = await p(log.append)(msg1)
|
||||||
|
assert.equal(offset1, 0)
|
||||||
|
const offset2 = await p(log.append)(msg2)
|
||||||
|
assert.ok(offset2 > offset1)
|
||||||
|
|
||||||
|
const buf1 = await p(log._get)(offset1)
|
||||||
|
assert.equal(buf1.toString(), msg1.toString())
|
||||||
|
const buf2 = await p(log._get)(offset2)
|
||||||
|
assert.equal(buf2.toString(), msg2.toString())
|
||||||
|
|
||||||
|
const promise = p(log.overwrite)(
|
||||||
|
offset1,
|
||||||
|
Buffer.from('hello world hello world hello world hello world')
|
||||||
|
)
|
||||||
|
await assert.rejects(promise, (err) => {
|
||||||
|
assert.ok(err)
|
||||||
|
assert.match(err.message, /should not be larger than existing data/)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
await p(log.close)()
|
||||||
|
})
|
||||||
|
})
|
Loading…
Reference in New Issue