Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
app.get("/v1/:username", async (req, res, next) => {
try {
const { username } = req.params;
const { format } = req.query;
const key = `${username}-${format}`;
const cached = cache.get(key);
if (cached !== null) {
return res.json(cached);
}
const data = await fetch(username, format);
cache.put(key, data, 1000 * 3600); // Store for an hour
res.json(data);
} catch (err) {
next(new VError(err, alerts.error.profileDisabled));
}
});
socket.on("message", async function(message, remote) {
// await lastMessageProcess;
// log.info(remote.address + ':' + remote.port +' - ' + message);
// await Promise.all(outstandingInserts); // wait for all prior inserts to occur
let parsed;
try {
parsed = parse(message);
} catch (err) {
let wrapped = new verror.WError(
err,
"Error parsing from: " +
remote.address +
":" +
remote.port +
" , message: " +
message
);
throw wrapped;
}
if (insertrawevents) {
let raw = r
.table("rawevents")
.insert({
host: os.hostname(),
import VError = require("verror");
import { VError as VError2, MultiError, SError, WError } from "verror";
const error = new Error("foo");
const verror1 = new VError(error, "bar");
const verror2 = new VError2(error, "bar");
const serror = new SError(error, "bar");
const multiError = new MultiError([verror1, verror2]);
const werror = new WError(verror1, "foobar");
const verror3 = new VError({
name: "fooError",
cause: error,
info: {
"info0": "baz"
}
}, "bar");
const verror4 = new VError({ cause: null }, "bar");
const cause1: Error | undefined = verror1.cause();
const cause2: Error | undefined = werror.cause();
const info: { [k: string]: any } = VError.info(verror3);
const namedCause: Error | null = VError.findCauseByName(verror3, "fooError");
}
/*
* It's possible to ignore the following warnings, so we collect
* them in one place in order to print them to the operator in a
* later prompt.
*/
var warnings = [];
if (_.fullState.pgs_errors.length) {
warnings.push(new VError.WError(new VError.MultiError(
_.fullState.pgs_errors), 'cluster has errors'));
}
if (_.fullState.pgs_warnings.length) {
warnings.push(new VError.WError(new VError.MultiError(
_.fullState.pgs_warnings), 'cluster has warnings'));
}
/*
* We need to check the replication LSNs here to ensure we have
* genuine lag, as opposed to the cluster just not taking any
* writes.
*/
var replErrs = [];
jsprim.forEachKey(_.fullState.pgs_peers, function (key, peer) {
var lag_in_seconds = lagInSeconds(peer.pgp_lag);
if (lag_in_seconds instanceof Error) {
replErrs.push(new VError(lag_in_seconds, '"%s"',
peer.pgp_label));
}
if (lag_in_seconds !== null && lag_in_seconds > _.lagToIgnore) {
res.on('end', function () {
var body = chunks.join('');
var data;
try {
data = JSON.parse(body);
} catch (syntaxErr) {
callback(new WError(syntaxErr,
'invalid image data in response: \'%s\'',
body));
return;
}
callback(null, data, res);
});
});
}
if (_.fullState.pgs_singleton) {
cb(new VError('cluster is in singleton mode'));
return;
}
/*
* It's possible to ignore the following warnings, so we collect
* them in one place in order to print them to the operator in a
* later prompt.
*/
var warnings = [];
if (_.fullState.pgs_errors.length) {
warnings.push(new VError.WError(new VError.MultiError(
_.fullState.pgs_errors), 'cluster has errors'));
}
if (_.fullState.pgs_warnings.length) {
warnings.push(new VError.WError(new VError.MultiError(
_.fullState.pgs_warnings), 'cluster has warnings'));
}
/*
* We need to check the replication LSNs here to ensure we have
* genuine lag, as opposed to the cluster just not taking any
* writes.
*/
var replErrs = [];
jsprim.forEachKey(_.fullState.pgs_peers, function (key, peer) {
var lag_in_seconds = lagInSeconds(peer.pgp_lag);
* writes.
*/
var replErrs = [];
jsprim.forEachKey(_.fullState.pgs_peers, function (key, peer) {
var lag_in_seconds = lagInSeconds(peer.pgp_lag);
if (lag_in_seconds instanceof Error) {
replErrs.push(new VError(lag_in_seconds, '"%s"',
peer.pgp_label));
}
if (lag_in_seconds !== null && lag_in_seconds > _.lagToIgnore) {
replErrs.push(new VError('"%s" has %ds of lag behind ' +
'its upstream peer', peer.pgp_label, lag_in_seconds));
}
});
if (replErrs.length) {
warnings.push(new VError.WError(new VError.MultiError(replErrs),
'cluster is exhibiting lag'));
}
_.clusterWarnings = warnings;
cb();
},
function promptForWarnings(_, cb) {
async function getMediaUrl(base64data) {
try {
const buff = dataUriToBuffer(base64data);
const mediaResponse = await twitterClient.post("media/upload", {
media_data: buff.toString("base64")
});
const tweetResponse = await twitterClient.post("statuses/update", {
status: "canvas",
media_ids: mediaResponse.media_id_string
});
return tweetResponse.entities.media[0].media_url.replace(
"http://",
"https://"
);
} catch (err) {
throw new VError(err, alerts.error.imageUploadFailed);
}
};
app.post("/v1/tweetMedia", (req, res, next) => {
const { image } = req.body;
if (typeof image !== "string") {
return next(new VError(alerts.error.imageInvalid));
}
getTwitterMediaUrl(image)
.then(mediaUrl =>
res.json({
mediaUrl
})
)
.catch(next);
});