implement per-channel fifo queues for metadata and channel writes
This commit is contained in:
parent
41c86bb6bc
commit
4902554a61
126
historyKeeper.js
126
historyKeeper.js
@ -7,6 +7,7 @@ const Nacl = require('tweetnacl');
|
|||||||
const Crypto = require('crypto');
|
const Crypto = require('crypto');
|
||||||
const Once = require("./lib/once");
|
const Once = require("./lib/once");
|
||||||
const Meta = require("./lib/metadata");
|
const Meta = require("./lib/metadata");
|
||||||
|
const WriteQueue = require("./lib/write-queue");
|
||||||
|
|
||||||
let Log;
|
let Log;
|
||||||
const now = function () { return (new Date()).getTime(); };
|
const now = function () { return (new Date()).getTime(); };
|
||||||
@ -302,88 +303,59 @@ module.exports.create = function (cfg) {
|
|||||||
* the fix is to use callbacks and implement queueing for writes
|
* the fix is to use callbacks and implement queueing for writes
|
||||||
* to guarantee that offset computation is always atomic with writes
|
* to guarantee that offset computation is always atomic with writes
|
||||||
*/
|
*/
|
||||||
const storageQueues = {};
|
const queueStorage = WriteQueue();
|
||||||
|
|
||||||
const storeQueuedMessage = function (ctx, queue, id) {
|
|
||||||
if (queue.length === 0) {
|
|
||||||
delete storageQueues[id];
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const first = queue.shift();
|
|
||||||
|
|
||||||
const msgBin = first.msg;
|
|
||||||
const optionalMessageHash = first.hash;
|
|
||||||
const isCp = first.isCp;
|
|
||||||
|
|
||||||
// Store the message first, and update the index only once it's stored.
|
|
||||||
// store.messageBin can be async so updating the index first may
|
|
||||||
// result in a wrong cpIndex
|
|
||||||
nThen((waitFor) => {
|
|
||||||
store.messageBin(id, msgBin, waitFor(function (err) {
|
|
||||||
if (err) {
|
|
||||||
waitFor.abort();
|
|
||||||
Log.error("HK_STORE_MESSAGE_ERROR", err.message);
|
|
||||||
|
|
||||||
// this error is critical, but there's not much we can do at the moment
|
|
||||||
// proceed with more messages, but they'll probably fail too
|
|
||||||
// at least you won't have a memory leak
|
|
||||||
|
|
||||||
// TODO make it possible to respond to clients with errors so they know
|
|
||||||
// their message wasn't stored
|
|
||||||
storeQueuedMessage(ctx, queue, id);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}));
|
|
||||||
}).nThen((waitFor) => {
|
|
||||||
getIndex(ctx, id, waitFor((err, index) => {
|
|
||||||
if (err) {
|
|
||||||
Log.warn("HK_STORE_MESSAGE_INDEX", err.stack);
|
|
||||||
// non-critical, we'll be able to get the channel index later
|
|
||||||
|
|
||||||
// proceed to the next message in the queue
|
|
||||||
storeQueuedMessage(ctx, queue, id);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (typeof (index.line) === "number") { index.line++; }
|
|
||||||
if (isCp) {
|
|
||||||
index.cpIndex = sliceCpIndex(index.cpIndex, index.line || 0);
|
|
||||||
for (let k in index.offsetByHash) {
|
|
||||||
if (index.offsetByHash[k] < index.cpIndex[0]) {
|
|
||||||
delete index.offsetByHash[k];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
index.cpIndex.push(({
|
|
||||||
offset: index.size,
|
|
||||||
line: ((index.line || 0) + 1)
|
|
||||||
} /*:cp_index_item*/));
|
|
||||||
}
|
|
||||||
if (optionalMessageHash) { index.offsetByHash[optionalMessageHash] = index.size; }
|
|
||||||
index.size += msgBin.length;
|
|
||||||
|
|
||||||
// handle the next element in the queue
|
|
||||||
storeQueuedMessage(ctx, queue, id);
|
|
||||||
}));
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
const storeMessage = function (ctx, channel, msg, isCp, optionalMessageHash) {
|
const storeMessage = function (ctx, channel, msg, isCp, optionalMessageHash) {
|
||||||
const id = channel.id;
|
const id = channel.id;
|
||||||
|
|
||||||
const msgBin = new Buffer(msg + '\n', 'utf8');
|
const msgBin = new Buffer(msg + '\n', 'utf8');
|
||||||
if (Array.isArray(storageQueues[id])) {
|
|
||||||
return void storageQueues[id].push({
|
|
||||||
msg: msgBin,
|
|
||||||
hash: optionalMessageHash,
|
|
||||||
isCp: isCp,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
const queue = storageQueues[id] = (storageQueues[id] || [{
|
queueStorage(id, function (next) {
|
||||||
msg: msgBin,
|
// Store the message first, and update the index only once it's stored.
|
||||||
hash: optionalMessageHash,
|
// store.messageBin can be async so updating the index first may
|
||||||
}]);
|
// result in a wrong cpIndex
|
||||||
storeQueuedMessage(ctx, queue, id);
|
nThen((waitFor) => {
|
||||||
|
store.messageBin(id, msgBin, waitFor(function (err) {
|
||||||
|
if (err) {
|
||||||
|
waitFor.abort();
|
||||||
|
Log.error("HK_STORE_MESSAGE_ERROR", err.message);
|
||||||
|
|
||||||
|
// this error is critical, but there's not much we can do at the moment
|
||||||
|
// proceed with more messages, but they'll probably fail too
|
||||||
|
// at least you won't have a memory leak
|
||||||
|
|
||||||
|
// TODO make it possible to respond to clients with errors so they know
|
||||||
|
// their message wasn't stored
|
||||||
|
return void next();
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}).nThen((waitFor) => {
|
||||||
|
getIndex(ctx, id, waitFor((err, index) => {
|
||||||
|
if (err) {
|
||||||
|
Log.warn("HK_STORE_MESSAGE_INDEX", err.stack);
|
||||||
|
// non-critical, we'll be able to get the channel index later
|
||||||
|
return void next();
|
||||||
|
}
|
||||||
|
if (typeof (index.line) === "number") { index.line++; }
|
||||||
|
if (isCp) {
|
||||||
|
index.cpIndex = sliceCpIndex(index.cpIndex, index.line || 0);
|
||||||
|
for (let k in index.offsetByHash) {
|
||||||
|
if (index.offsetByHash[k] < index.cpIndex[0]) {
|
||||||
|
delete index.offsetByHash[k];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
index.cpIndex.push(({
|
||||||
|
offset: index.size,
|
||||||
|
line: ((index.line || 0) + 1)
|
||||||
|
} /*:cp_index_item*/));
|
||||||
|
}
|
||||||
|
if (optionalMessageHash) { index.offsetByHash[optionalMessageHash] = index.size; }
|
||||||
|
index.size += msgBin.length;
|
||||||
|
|
||||||
|
// handle the next element in the queue
|
||||||
|
next();
|
||||||
|
}));
|
||||||
|
});
|
||||||
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
var CHECKPOINT_PATTERN = /^cp\|(([A-Za-z0-9+\/=]+)\|)?/;
|
var CHECKPOINT_PATTERN = /^cp\|(([A-Za-z0-9+\/=]+)\|)?/;
|
||||||
|
|||||||
40
lib/write-queue.js
Normal file
40
lib/write-queue.js
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
/*
|
||||||
|
var q = Queue();
|
||||||
|
q(id, function (next) {
|
||||||
|
// whatever you need to do....
|
||||||
|
|
||||||
|
// when you're done
|
||||||
|
next();
|
||||||
|
});
|
||||||
|
*/
|
||||||
|
|
||||||
|
var fix1 = function (f, x) {
|
||||||
|
return function () { f(x); };
|
||||||
|
};
|
||||||
|
|
||||||
|
module.exports = function () {
|
||||||
|
var map = {};
|
||||||
|
|
||||||
|
var next = function (id) {
|
||||||
|
if (map[id] && map[id].length === 0) { return void delete map[id]; }
|
||||||
|
var task = map[id].shift();
|
||||||
|
task(fix1(next, id));
|
||||||
|
};
|
||||||
|
|
||||||
|
return function (id, task) {
|
||||||
|
// support initialization with just a function
|
||||||
|
if (typeof(id) === 'function' && typeof(task) === 'undefined') {
|
||||||
|
task = id;
|
||||||
|
id = '';
|
||||||
|
}
|
||||||
|
// ...but you really need to pass a function
|
||||||
|
if (typeof(task) !== 'function') { throw new Error("Expected function"); }
|
||||||
|
|
||||||
|
// if the intended queue already has tasks in progress, add this one to the end of the queue
|
||||||
|
if (map[id]) { return void map[id].push(task); }
|
||||||
|
|
||||||
|
// otherwise create a queue containing the given task
|
||||||
|
map[id] = [task];
|
||||||
|
next(id);
|
||||||
|
};
|
||||||
|
};
|
||||||
37
rpc.js
37
rpc.js
@ -18,7 +18,7 @@ const nThen = require("nthen");
|
|||||||
const getFolderSize = require("get-folder-size");
|
const getFolderSize = require("get-folder-size");
|
||||||
const Pins = require("./lib/pins");
|
const Pins = require("./lib/pins");
|
||||||
const Meta = require("./lib/metadata");
|
const Meta = require("./lib/metadata");
|
||||||
|
const WriteQueue = require("./lib/write-queue");
|
||||||
|
|
||||||
var RPC = module.exports;
|
var RPC = module.exports;
|
||||||
|
|
||||||
@ -340,8 +340,7 @@ var getMetadata = function (Env, channel, cb) {
|
|||||||
value: value
|
value: value
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
// XXX global saferphore may cause issues here, a queue "per channel" is probably better
|
var queueMetadata = WriteQueue();
|
||||||
var metadataSem = Saferphore.create(1);
|
|
||||||
var setMetadata = function (Env, data, unsafeKey, cb) {
|
var setMetadata = function (Env, data, unsafeKey, cb) {
|
||||||
var channel = data.channel;
|
var channel = data.channel;
|
||||||
var command = data.command;
|
var command = data.command;
|
||||||
@ -349,16 +348,15 @@ var setMetadata = function (Env, data, unsafeKey, cb) {
|
|||||||
if (!command || typeof (command) !== 'string') { return void cb ('INVALID_COMMAND'); }
|
if (!command || typeof (command) !== 'string') { return void cb ('INVALID_COMMAND'); }
|
||||||
if (Meta.commands.indexOf(command) === -1) { return void('UNSUPPORTED_COMMAND'); }
|
if (Meta.commands.indexOf(command) === -1) { return void('UNSUPPORTED_COMMAND'); }
|
||||||
|
|
||||||
metadataSem.take(function (give) {
|
queueMetadata(channel, function (next) {
|
||||||
var g = give();
|
|
||||||
getMetadata(Env, channel, function (err, metadata) {
|
getMetadata(Env, channel, function (err, metadata) {
|
||||||
if (err) {
|
if (err) {
|
||||||
g();
|
cb(err);
|
||||||
return void cb(err);
|
return void next();
|
||||||
}
|
}
|
||||||
if (!(metadata && Array.isArray(metadata.owners))) {
|
if (!(metadata && Array.isArray(metadata.owners))) {
|
||||||
g();
|
cb('E_NO_OWNERS');
|
||||||
return void cb('E_NO_OWNERS');
|
return void next();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Confirm that the channel is owned by the user in question
|
// Confirm that the channel is owned by the user in question
|
||||||
@ -372,13 +370,13 @@ var setMetadata = function (Env, data, unsafeKey, cb) {
|
|||||||
|| !Array.isArray(data.value)
|
|| !Array.isArray(data.value)
|
||||||
|| data.value.length !== 1
|
|| data.value.length !== 1
|
||||||
|| data.value[0] !== unsafeKey) {
|
|| data.value[0] !== unsafeKey) {
|
||||||
g();
|
cb('INSUFFICIENT_PERMISSIONS');
|
||||||
return void cb('INSUFFICIENT_PERMISSIONS');
|
return void next();
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (metadata.owners.indexOf(unsafeKey) === -1) {
|
} else if (metadata.owners.indexOf(unsafeKey) === -1) {
|
||||||
g();
|
cb('INSUFFICIENT_PERMISSIONS');
|
||||||
return void cb('INSUFFICIENT_PERMISSIONS');
|
return void next();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add the new metadata line
|
// Add the new metadata line
|
||||||
@ -387,22 +385,23 @@ var setMetadata = function (Env, data, unsafeKey, cb) {
|
|||||||
try {
|
try {
|
||||||
changed = Meta.handleCommand(metadata, line);
|
changed = Meta.handleCommand(metadata, line);
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
g();
|
cb(e);
|
||||||
return void cb(e);
|
return void next();
|
||||||
}
|
}
|
||||||
|
|
||||||
// if your command is valid but it didn't result in any change to the metadata,
|
// if your command is valid but it didn't result in any change to the metadata,
|
||||||
// call back now and don't write any "useless" line to the log
|
// call back now and don't write any "useless" line to the log
|
||||||
if (!changed) {
|
if (!changed) {
|
||||||
g();
|
cb(void 0, metadata);
|
||||||
return void cb(void 0, metadata);
|
return void next();
|
||||||
}
|
}
|
||||||
Env.msgStore.writeMetadata(channel, JSON.stringify(line), function (e) {
|
Env.msgStore.writeMetadata(channel, JSON.stringify(line), function (e) {
|
||||||
g();
|
|
||||||
if (e) {
|
if (e) {
|
||||||
return void cb(e);
|
cb(e);
|
||||||
|
return void next();
|
||||||
}
|
}
|
||||||
cb(void 0, metadata);
|
cb(void 0, metadata);
|
||||||
|
next();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user