improve worker logging and move blob deletion to worker processes as well

This commit is contained in:
ansuz
2020-03-27 13:38:27 -04:00
parent 5f69fc18d0
commit 9dbd32758a
6 changed files with 386 additions and 331 deletions

View File

@@ -10,8 +10,22 @@ const Meta = require("../metadata");
const Pins = require("../pins");
const Core = require("../commands/core");
const Saferphore = require("saferphore");
const Logger = require("../log");
const Env = {};
const Env = {
Log: {},
};
// support the usual log API but pass it to the main process
Logger.levels.forEach(function (level) {
Env.Log[level] = function (label, info) {
process.send({
log: level,
label: label,
info: info,
});
};
});
var ready = false;
var store;
@@ -57,10 +71,6 @@ const init = function (config, _cb) {
});
};
const tryParse = function (Env, str) {
try { return JSON.parse(str); } catch (err) { }
};
/* computeIndex
can call back with an error or a computed index which includes:
* cpIndex:
@@ -107,7 +117,7 @@ const computeIndex = function (data, cb) {
// but only check for metadata on the first line
if (!i && msgObj.buff.indexOf('{') === 0) {
i++; // always increment the message counter
msg = tryParse(Env, msgObj.buff.toString('utf8'));
msg = HK.tryParse(Env, msgObj.buff.toString('utf8'));
if (typeof msg === "undefined") { return readMore(); }
// validate that the current line really is metadata before storing it as such
@@ -116,7 +126,7 @@ const computeIndex = function (data, cb) {
}
i++;
if (msgObj.buff.indexOf('cp|') > -1) {
msg = msg || tryParse(Env, msgObj.buff.toString('utf8'));
msg = msg || HK.tryParse(Env, msgObj.buff.toString('utf8'));
if (typeof msg === "undefined") { return readMore(); }
// cache the offsets of checkpoints if they can be parsed
if (msg[2] === 'MSG' && msg[4].indexOf('cp|') === 0) {
@@ -142,7 +152,7 @@ const computeIndex = function (data, cb) {
// once indexing is complete you should have a buffer of messages since the latest checkpoint
// map the 'hash' of each message to its byte offset in the log, to be used for reconnecting clients
messageBuf.forEach((msgObj) => {
const msg = tryParse(Env, msgObj.buff.toString('utf8'));
const msg = HK.tryParse(Env, msgObj.buff.toString('utf8'));
if (typeof msg === "undefined") { return; }
if (msg[0] === 0 && msg[2] === 'MSG' && typeof(msg[4]) === 'string') {
// msgObj.offset is API guaranteed by our storage module
@@ -166,9 +176,9 @@ const computeIndex = function (data, cb) {
});
};
const computeMetadata = function (data, cb, errorHandler) {
const computeMetadata = function (data, cb) {
const ref = {};
const lineHandler = Meta.createLineHandler(ref, errorHandler);
const lineHandler = Meta.createLineHandler(ref, Env.Log.error);
return void store.readChannelMetadata(data.channel, lineHandler, function (err) {
if (err) {
// stream errors?
@@ -199,7 +209,7 @@ const getOlderHistory = function (data, cb) {
store.getMessages(channelName, function (msgStr) {
if (found) { return; }
let parsed = tryParse(Env, msgStr);
let parsed = HK.tryParse(Env, msgStr);
if (typeof parsed === "undefined") { return; }
// identify classic metadata messages by their inclusion of a channel.
@@ -221,11 +231,11 @@ const getOlderHistory = function (data, cb) {
});
};
const getPinState = function (data, cb, errorHandler) {
const getPinState = function (data, cb) {
const safeKey = data.key;
var ref = {};
var lineHandler = Pins.createLineHandler(ref, errorHandler);
var lineHandler = Pins.createLineHandler(ref, Env.Log.error);
// if channels aren't in memory. load them from disk
// TODO replace with readMessagesBin
@@ -328,7 +338,7 @@ const getHashOffset = function (data, cb) {
var offset = -1;
store.readMessagesBin(channelName, 0, (msgObj, readMore, abort) => {
// tryParse return a parsed message or undefined
const msg = tryParse(Env, msgObj.buff.toString('utf8'));
const msg = HK.tryParse(Env, msgObj.buff.toString('utf8'));
// if it was undefined then go onto the next message
if (typeof msg === "undefined") { return readMore(); }
if (typeof(msg[4]) !== 'string' || lastKnownHash !== HK.getHash(msg[4])) {
@@ -342,6 +352,47 @@ const getHashOffset = function (data, cb) {
});
};
const removeOwnedBlob = function (data, cb) {
const blobId = data.blobId;
const safeKey = data.safeKey;
nThen(function (w) {
// check if you have permissions
blobStore.isOwnedBy(safeKey, blobId, w(function (err, owned) {
if (err || !owned) {
w.abort();
return void cb("INSUFFICIENT_PERMISSIONS");
}
}));
}).nThen(function (w) {
// remove the blob
blobStore.archive.blob(blobId, w(function (err) {
Env.Log.info('ARCHIVAL_OWNED_FILE_BY_OWNER_RPC', {
safeKey: safeKey,
blobId: blobId,
status: err? String(err): 'SUCCESS',
});
if (err) {
w.abort();
return void cb(err);
}
}));
}).nThen(function () {
// archive the proof
blobStore.archive.proof(safeKey, blobId, function (err) {
Env.Log.info("ARCHIVAL_PROOF_REMOVAL_BY_OWNER_RPC", {
safeKey: safeKey,
blobId: blobId,
status: err? String(err): 'SUCCESS',
});
if (err) {
return void cb("E_PROOF_REMOVAL");
}
cb(void 0, 'OK');
});
});
};
const COMMANDS = {
COMPUTE_INDEX: computeIndex,
COMPUTE_METADATA: computeMetadata,
@@ -352,12 +403,14 @@ const COMMANDS = {
GET_DELETED_PADS: getDeletedPads,
GET_MULTIPLE_FILE_SIZE: getMultipleFileSize,
GET_HASH_OFFSET: getHashOffset,
REMOVE_OWNED_BLOB: removeOwnedBlob,
};
process.on('message', function (data) {
if (!data || !data.txid) {
if (!data || !data.txid || !data.pid) {
return void process.send({
error:'E_INVAL'
error:'E_INVAL',
data: data,
});
}
@@ -365,6 +418,7 @@ process.on('message', function (data) {
process.send({
error: err,
txid: data.txid,
pid: data.pid,
value: value,
});
};
@@ -381,12 +435,12 @@ process.on('message', function (data) {
if (typeof(command) !== 'function') {
return void cb("E_BAD_COMMAND");
}
command(data, cb, function (label, info) {
// for streaming errors
process.send({
error: label,
value: info,
});
});
command(data, cb);
});
process.on('uncaughtException', function (err) {
console.error('[%s] UNCAUGHT EXCEPTION IN DB WORKER');
console.error(err);
console.error("TERMINATING");
process.exit(1);
});