support erase() persistence

This commit is contained in:
Andre Staltz 2023-11-10 15:46:17 +02:00
parent 9e7feb3d41
commit ae122c815e
No known key found for this signature in database
GPG Key ID: 9EDE23EA7E8A4890
5 changed files with 99 additions and 87 deletions

View File

@ -5,7 +5,8 @@ const MsgV3 = require('./msg-v3')
/** /**
* @typedef {import('./index').Msg} Msg * @typedef {import('./index').Msg} Msg
* @typedef {import('./index').RecPresent} RecPresent * @typedef {import('./index').RecPresent} RecPresent
* @typedef {RecPresent['misc']} Misc * @typedef {import('./index').Rec} Rec
* @typedef {import('./index').Misc} Misc
* @typedef {import('ppppp-keypair').Keypair} Keypair * @typedef {import('ppppp-keypair').Keypair} Keypair
* *
* @typedef {Buffer | Uint8Array} B4A * @typedef {Buffer | Uint8Array} B4A
@ -45,11 +46,11 @@ function keypairToSSBKeys(keypair) {
const decryptCache = new WeakMap() const decryptCache = new WeakMap()
/** /**
* @template {{msg: Msg, misc?: Misc}} T * @template {{msg: Msg}} T
* @param {T} rec * @param {T} rec
* @param {any} peer * @param {any} peer
* @param {any} config * @param {any} config
* @returns {T & {misc?: Misc}} * @returns {T}
*/ */
function decrypt(rec, peer, config) { function decrypt(rec, peer, config) {
if (decryptCache.has(rec)) return decryptCache.get(rec) if (decryptCache.has(rec)) return decryptCache.get(rec)
@ -73,7 +74,7 @@ function decrypt(rec, peer, config) {
...rec, ...rec,
msg: msgDecrypted, msg: msgDecrypted,
misc: { misc: {
...rec.misc, // ...rec.misc,
private: true, private: true,
originalData: data, originalData: data,
encryptionFormat: encryptionFormat.name, encryptionFormat: encryptionFormat.name,
@ -87,20 +88,20 @@ function decrypt(rec, peer, config) {
* @param {RecPresent} rec * @param {RecPresent} rec
* @returns {RecPresent} * @returns {RecPresent}
*/ */
function reEncrypt(rec) { // function reEncrypt(rec) {
return { // return {
id: rec.id, // id: rec.id,
msg: { ...rec.msg, data: rec.misc.originalData }, // msg: { ...rec.msg, data: rec.misc.originalData },
received: rec.received, // received: rec.received,
misc: { // misc: {
seq: rec.misc.seq, // seq: rec.misc.seq,
offset: rec.misc.offset, // offset: rec.misc.offset,
size: rec.misc.size, // size: rec.misc.size,
}, // },
} // }
} // }
module.exports = { module.exports = {
decrypt, decrypt,
reEncrypt, // reEncrypt,
} }

View File

@ -34,11 +34,6 @@ const { decrypt } = require('./encryption')
* id?: never; * id?: never;
* msg?: never; * msg?: never;
* received?: never; * received?: never;
* misc: {
* offset: number;
* size: number;
* seq: number;
* };
* }} RecDeleted * }} RecDeleted
* *
* @typedef {{ * @typedef {{
@ -51,15 +46,16 @@ const { decrypt } = require('./encryption')
* id: MsgID; * id: MsgID;
* msg: Msg; * msg: Msg;
* received: number; * received: number;
* misc: { * }} RecPresent
*
* @typedef {{
* offset: number; * offset: number;
* size: number; * size: number;
* seq: number; * seq: number;
* private?: boolean; * private?: boolean;
* originalData?: any; * originalData?: any;
* encryptionFormat?: string; * encryptionFormat?: string;
* } * }} Misc
* }} RecPresent
* *
* @typedef {RecPresent | RecDeleted} Rec * @typedef {RecPresent | RecDeleted} Rec
*/ */
@ -127,9 +123,12 @@ class DBTangle extends MsgV3.Tangle {
* @param {{ path: string; keypair: Keypair; }} config * @param {{ path: string; keypair: Keypair; }} config
*/ */
function initDB(peer, config) { function initDB(peer, config) {
/** @type {Array<Rec>} */ /** @type {Array<Rec | null>} */
const recs = [] const recs = []
/** @type {WeakMap<Rec, Misc>} */
const miscRegistry = new WeakMap()
/** @type {Map<string, EncryptionFormat>} */ /** @type {Map<string, EncryptionFormat>} */
const encryptionFormats = new Map() const encryptionFormats = new Map()
@ -181,26 +180,21 @@ function initDB(peer, config) {
// setTimeout to let peer.db.* secret-stack become available // setTimeout to let peer.db.* secret-stack become available
// needed by decrypt() // needed by decrypt()
setTimeout(() => { setTimeout(() => {
let i = -1 let seq = -1
log.scan( log.scan(
function scanEach(offset, recInLog, size) { function scanEach(offset, recInLog, size) {
i += 1 seq += 1
if (!recInLog) { if (!recInLog) {
// deleted record // deleted record
/** @type {RecDeleted} */ recs.push(null)
const rec = { misc: { offset, size, seq: i } }
recs.push(rec)
return return
} }
// TODO: for performance, dont decrypt on startup, instead decrypt on // TODO: for performance, dont decrypt on startup, instead decrypt on
// demand, or decrypt in the background. Or then store the log with // demand, or decrypt in the background. Or then store the log with
// decrypted msgs and only encrypt when moving it to the network. // decrypted msgs and only encrypt when moving it to the network.
/** @type {RecPresent} */ /** @type {RecPresent} */
const rec = /** @type {any} */ (decrypt(recInLog, peer, config)) const rec = decrypt(recInLog, peer, config)
rec.misc ??= /** @type {Rec['misc']} */ ({}) miscRegistry.set(rec, { offset, size, seq })
rec.misc.offset = offset
rec.misc.size = size
rec.misc.seq = i
recs.push(rec) recs.push(rec)
}, },
function scanEnd(err) { function scanEnd(err) {
@ -227,11 +221,11 @@ function initDB(peer, config) {
if (err) return cb(new Error('logAppend failed', { cause: err })) if (err) return cb(new Error('logAppend failed', { cause: err }))
const size = b4a.from(JSON.stringify(recInLog), 'utf8').length const size = b4a.from(JSON.stringify(recInLog), 'utf8').length
const seq = recs.length const seq = recs.length
// FIXME: where do we put originalData ???
const recExposed = decrypt(recInLog, peer, config) const recExposed = decrypt(recInLog, peer, config)
const rec = /** @type {RecPresent} */ (recInLog); const rec = /** @type {RecPresent} */ (recInLog)
rec.misc = { offset, size, seq } miscRegistry.set(rec, { offset, size, seq })
recExposed.misc = {...recExposed.misc, ...rec.misc} recs.push(recExposed)
recs.push(/** @type {any} */ (recExposed))
cb(null, rec) cb(null, rec)
}) })
} }
@ -938,8 +932,13 @@ function initDB(peer, config) {
const rec = getRecord(msgID) const rec = getRecord(msgID)
if (!rec) return cb() if (!rec) return cb()
if (!rec.msg) return cb() if (!rec.msg) return cb()
const { offset, size, seq } = rec.misc const misc = miscRegistry.get(rec)
recs[rec.misc.seq] = { misc: { offset, size, seq } } const seq = misc?.seq ?? -1
const offset = misc?.offset ?? -1
if (seq === -1) {
return cb(new Error('del() failed to find record in miscRegistry'))
}
recs[seq] = null
log.onDrain(() => { log.onDrain(() => {
log.del(offset, cb) log.del(offset, cb)
}) })
@ -1022,9 +1021,17 @@ function initDB(peer, config) {
if (!rec) return cb() if (!rec) return cb()
if (!rec.msg) return cb() if (!rec.msg) return cb()
if (!rec.msg.data) return cb() if (!rec.msg.data) return cb()
recs[rec.misc.seq].msg = MsgV3.erase(rec.msg) rec.msg = MsgV3.erase(rec.msg)
// FIXME: persist this change to disk!! Not supported by AAOL yet const misc = miscRegistry.get(rec)
cb() const seq = misc?.seq ?? -1
const offset = misc?.offset ?? -1
if (seq === -1) {
return cb(new Error('erase() failed to find record in miscRegistry'))
}
recs[seq] = rec
log.onDrain(() => {
log.overwrite(offset, rec, cb)
})
} }
/** /**
@ -1038,7 +1045,7 @@ function initDB(peer, config) {
function* msgs() { function* msgs() {
for (let i = 0; i < recs.length; i++) { for (let i = 0; i < recs.length; i++) {
const rec = recs[i] const rec = recs[i]
if (rec.msg) yield rec.msg if (rec?.msg) yield rec.msg
} }
} }

View File

@ -681,7 +681,7 @@ function Log(filename, opts) {
const oldDataLength = Record.readDataLength(blockBufNow, offsetInBlock) const oldDataLength = Record.readDataLength(blockBufNow, offsetInBlock)
const oldEmptyLength = Record.readEmptyLength(blockBufNow, offsetInBlock) const oldEmptyLength = Record.readEmptyLength(blockBufNow, offsetInBlock)
// Make sure encodedData fits inside existing record // Make sure encodedData fits inside existing record
if (Record.size(encodedData) > oldDataLength + oldEmptyLength) { if (encodedData.length > oldDataLength + oldEmptyLength) {
return cb(overwriteLargerThanOld()) return cb(overwriteLargerThanOld())
} }
const newEmptyLength = oldDataLength - encodedData.length const newEmptyLength = oldDataLength - encodedData.length

View File

@ -68,40 +68,44 @@ test('erase()', async (t) => {
await p(peer.close)(true) await p(peer.close)(true)
// FIXME: const log = Log(path.join(DIR, 'db.bin'), {
// const log = AAOL(path.join(DIR, 'db.bin'), { cacheSize: 1,
// cacheSize: 1, blockSize: 64 * 1024,
// blockSize: 64 * 1024, codec: {
// codec: { encode(msg) {
// encode(msg) { return Buffer.from(JSON.stringify(msg), 'utf8')
// return Buffer.from(JSON.stringify(msg), 'utf8') },
// }, decode(buf) {
// decode(buf) { return JSON.parse(buf.toString('utf8'))
// return JSON.parse(buf.toString('utf8')) },
// }, },
// }, })
// })
const persistedMsgs = await new Promise((resolve, reject) => {
// const persistedMsgs = await new Promise((resolve, reject) => { let persistedMsgs = []
// let persistedMsgs = [] log.scan(
// log.stream({ offsets: true, values: true, sizes: true }).pipe( function drainEach(offset, rec, size) {
// push.drain( if (rec) {
// function drainEach({ offset, value, size }) { persistedMsgs.push(rec.msg)
// if (value) { }
// persistedMsgs.push(value.msg) },
// } function drainEnd(err) {
// }, if (err) return reject(err)
// function drainEnd(err) { resolve(persistedMsgs)
// if (err) return reject(err) }
// resolve(persistedMsgs) )
// } })
// )
// ) const afterReopen = []
// }) for (const msg of persistedMsgs) {
if (msg.data && msg.metadata.account?.length > 4) {
// t.deepEqual( afterReopen.push(msg.data.text)
// persistedMsgs.filter((msg) => msg.content).map((msg) => msg.content.text), }
// ['m0', 'm1', 'm3', 'm4'], }
// 'msgs in disk after the delete'
// ) assert.deepEqual(
afterReopen,
['m0', 'm1', 'm3', 'm4'],
'4 msgs after the erase'
)
}) })

View File

@ -35,7 +35,7 @@ test('records() iterator', async (t) => {
for (const rec of peer.db.records()) { for (const rec of peer.db.records()) {
if (!rec.msg.data) continue if (!rec.msg.data) continue
if (rec.msg.metadata.account === 'self') continue if (rec.msg.metadata.account === 'self') continue
assert.ok(rec.misc.size > rec.msg.metadata.dataSize, 'size > dataSize') assert.ok(rec.received, 'received')
count++ count++
} }
assert.equal(count, 6) assert.equal(count, 6)