Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
// see https://stackoverflow.com/questions/41814750/how-to-know-event-souce-of-lambda-function-in-itself
// for other event sources
// Modify to suit your usecase
log.warn("Unexpected invocation");
log.warn(event);
return;
}
if (!srcKey.endsWith('doc') && !srcKey.endsWith('docx') ) {
log.warn('Unsupported file type ' + srcKey);
return; // TODO: if step function, return error?
}
if (srcKey.endsWith("/")) {
// assume this is a folder; event probably triggered by copy/pasting a folder
log.debug("is folder; returning");
return;
}
// Output input URL for ease of inspection
log.info("https://s3.console.aws.amazon.com/s3/object/" + srcBucket + "/" + srcKey);
// Compute mimeType
var mimeType;
if (outputAs==Format.DOCX) {
mimeType = Format.DOC.toString();
} else if (outputAs==Format.PDF) {
mimeType = Format.PDF.toString();
} else {
log.error("Unsupported output format " + outputAs);
return;
mimeType = Format.DOC.toString();
} else if (outputAs==Format.PDF) {
mimeType = Format.PDF.toString();
} else {
log.error("Unsupported output format " + outputAs);
return;
}
// initialise engine.
// This is inside the handler since we need to read memoryLimitInMB from context
if (!INITIALISED) {
try {
config.init(context.memoryLimitInMB);
INITIALISED = true;
} catch (e) {
log.error(e);
return;
}
}
var sqsQueueUrl = process.env.SQS_WRITE_QUEUE_URL;
// Actually execute the steps
var data;
try {
// get the docx
data = await s3.getObject( {Bucket: srcBucket, Key: srcKey}).promise();
// convert it
var output = await helper.convert(srcKey, data.Body, outputAs );
// save the result
log.debug("uploading to s3 " + dstBucket);
srcBucket = body.source_bucket;
srcKey = body.source_key;
dstBucket = body.target_bucket;
dstKey = body.target_key;
correlationId = event.Records[0].messageAttributes.correlationId;
//.debug(correlationId);
} else {
// see https://stackoverflow.com/questions/41814750/how-to-know-event-souce-of-lambda-function-in-itself
// for other event sources
// Modify to suit your usecase
log.warn("Unexpected invocation");
log.warn(event);
return;
}
if (!srcKey.endsWith('doc') && !srcKey.endsWith('docx') ) {
log.warn('Unsupported file type ' + srcKey);
return; // TODO: if step function, return error?
}
if (srcKey.endsWith("/")) {
// assume this is a folder; event probably triggered by copy/pasting a folder
log.debug("is folder; returning");
return;
}
// Output input URL for ease of inspection
log.info("https://s3.console.aws.amazon.com/s3/object/" + srcBucket + "/" + srcKey);
srcKey = body.source_key;
dstBucket = body.target_bucket;
dstKey = body.target_key;
correlationId = event.Records[0].messageAttributes.correlationId;
//.debug(correlationId);
} else {
// see https://stackoverflow.com/questions/41814750/how-to-know-event-souce-of-lambda-function-in-itself
// for other event sources
// Modify to suit your usecase
log.warn("Unexpected invocation");
log.warn(event);
return;
}
if (!srcKey.endsWith('doc') && !srcKey.endsWith('docx') ) {
log.warn('Unsupported file type ' + srcKey);
return; // TODO: if step function, return error?
}
if (srcKey.endsWith("/")) {
// assume this is a folder; event probably triggered by copy/pasting a folder
log.debug("is folder; returning");
return;
}
// Output input URL for ease of inspection
log.info("https://s3.console.aws.amazon.com/s3/object/" + srcBucket + "/" + srcKey);
correlationId = event.Records[0].messageAttributes.correlationId;
//.debug(correlationId);
} else {
// see https://stackoverflow.com/questions/41814750/how-to-know-event-souce-of-lambda-function-in-itself
// for other event sources
// Modify to suit your usecase
log.warn("Unexpected invocation");
log.warn(event);
return;
}
if (!srcKey.endsWith('doc') && !srcKey.endsWith('docx') ) {
log.warn('Unsupported file type ' + srcKey);
return; // TODO: if step function, return error?
}
if (srcKey.endsWith("/")) {
// assume this is a folder; event probably triggered by copy/pasting a folder
log.debug("is folder; returning");
return;
}
// Output input URL for ease of inspection
log.info("https://s3.console.aws.amazon.com/s3/object/" + srcBucket + "/" + srcKey);
// Compute mimeType
var mimeType;
if (outputAs==Format.DOCX) {
mimeType = Format.DOC.toString();
throw e;
}
if (sqsQueueUrl) {
// TODO: write SQS message on failure?
}
/* For S3 trigger, broken documents saved to dstBucket/BROKEN
To get help, please note the contents of the assertion,
together with the document which caused it.
*/
// save broken documents to dstBucket/BROKEN
/* unless */
if (dstBucket == srcBucket) /* to avoid repetitively processing the same document */ {
log.error("RESULT: Failed " + srcKey);
log.debug("cowardly refusing to write broken document to srcBucket!");
return;
}
var ext = srcKey.substr(srcKey.lastIndexOf('.') + 1);
var mimeType;
if (ext=="docx") {
mimeType = "application/vnd.openxmlformats-officedocument.wordprocessingml.document";
} else if (ext=="doc") {
mimeType = Format.DOC.toString();
} else {
mimeType = "application/octet-stream";
}
dstKey = "BROKEN/" + srcKey + "-" + (new Date).getTime() + "." + ext;
log.error("RESULT: Failed " + dstKey ); /* Log analysis regex matching */
// save this bug doc
"SenderId": "123456789012",
"ApproximateFirstReceiveTimestamp": "1523232000001"
},
"messageAttributes": { "correlationId": "foo123"},
"md5OfBody": "7b270e59b47ff90a553787216d55d91d",
"eventSource": "aws:sqs",
"eventSourceARN": "arn:aws:sqs:us-west-2:123456789012:MyQueue",
"awsRegion": "us-west-2"
}
]
}
*
*/
const { body } = event.Records[0];
log.debug(body);
srcBucket = body.source_bucket;
srcKey = body.source_key;
dstBucket = body.target_bucket;
dstKey = body.target_key;
correlationId = event.Records[0].messageAttributes.correlationId;
//.debug(correlationId);
} else {
// see https://stackoverflow.com/questions/41814750/how-to-know-event-souce-of-lambda-function-in-itself
// for other event sources
// Modify to suit your usecase
return;
}
}
var sqsQueueUrl = process.env.SQS_WRITE_QUEUE_URL;
// Actually execute the steps
var data;
try {
// get the docx
data = await s3.getObject( {Bucket: srcBucket, Key: srcKey}).promise();
// convert it
var output = await helper.convert(srcKey, data.Body, outputAs );
// save the result
log.debug("uploading to s3 " + dstBucket);
await s3.putObject({
Bucket: dstBucket,
Key: dstKey,
Body: new Buffer(output) /* arrayBuffer to Buffer */,
ContentType: mimeType
}).promise();
log.info('RESULT: Success ' + dstKey); /* Log analysis regex matching */
//log.info(sqsQueueUrl);
if (sqsQueueUrl) {
// send SQS message
//log.info("write to sqs");
// Create an SQS service object
var sqs = new AWS.SQS({apiVersion: '2012-11-05'});
mimeType = "application/octet-stream";
}
dstKey = "BROKEN/" + srcKey + "-" + (new Date).getTime() + "." + ext;
log.error("RESULT: Failed " + dstKey ); /* Log analysis regex matching */
// save this bug doc
try {
await s3.putObject({
Bucket: dstBucket,
Key: dstKey,
Body: new Buffer(data.Body) /* arrayBuffer to Buffer */,
ContentType: mimeType
}).promise();
} catch (putErr) {
log.error(putErr);
log.error("Problem saving bug doc " + dstKey );
}
}
};
// Return a result (step function can catch this)
throw e;
}
if (sqsQueueUrl) {
// TODO: write SQS message on failure?
}
/* For S3 trigger, broken documents saved to dstBucket/BROKEN
To get help, please note the contents of the assertion,
together with the document which caused it.
*/
// save broken documents to dstBucket/BROKEN
/* unless */
if (dstBucket == srcBucket) /* to avoid repetitively processing the same document */ {
log.error("RESULT: Failed " + srcKey);
log.debug("cowardly refusing to write broken document to srcBucket!");
return;
}
var ext = srcKey.substr(srcKey.lastIndexOf('.') + 1);
var mimeType;
if (ext=="docx") {
mimeType = "application/vnd.openxmlformats-officedocument.wordprocessingml.document";
} else if (ext=="doc") {
mimeType = Format.DOC.toString();
} else {
mimeType = "application/octet-stream";
}
dstKey = "BROKEN/" + srcKey + "-" + (new Date).getTime() + "." + ext;
log.error("RESULT: Failed " + dstKey ); /* Log analysis regex matching */