Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
async function generateReport(reportType, startTime, endTime) {
log.debug(`ems-ingest-report.generateReport ${reportType} startTime: ${startTime} endTime: ${endTime}`);
if (!Object.keys(emsMappings).includes(reportType)) {
throw new Error(`ems-ingest-report.generateReport report type not supported: ${reportType}`);
}
// create a temporary file for the report
const name = buildReportFileName(reportType, startTime);
const filename = path.join(os.tmpdir(), name);
const stream = fs.createWriteStream(filename);
// retrieve granule/deletedgranule records in batches, and generate EMS records for each batch
const esClient = await Search.es();
const type = (reportType !== 'delete') ? 'granule' : 'deletedgranule';
const esIndex = process.env.ES_INDEX || defaultIndexAlias;
const searchQuery = buildSearchQuery(esIndex, type, startTime, endTime);
} else {
// Collection is passed in, but granule does not define the dataType and version
if (!dataType) dataType = this.collection.dataType || this.collection.name;
if (!version) version = this.collection.version;
}
// make sure there is a url_path
this.collection.url_path = this.collection.url_path || '';
this.collectionId = constructCollectionId(dataType, version);
const downloadFiles = granule.files
.filter((f) => this.filterChecksumFiles(f))
.map((f) => this.ingestFile(f, bucket, this.duplicateHandling));
log.debug('awaiting all download.Files');
const files = flatten(await Promise.all(downloadFiles));
log.debug('finished ingest()');
return {
granuleId: granule.granuleId,
dataType: dataType,
version: version,
files
};
}
let granules = response.hits.hits.map((s) => s._source);
let numRetrieved = granules.length;
stream.write(buildEMSRecords(emsMappings[reportType], granules).join('\n'));
while (response.hits.total !== numRetrieved) {
response = await esClient.scroll({ // eslint-disable-line no-await-in-loop
scrollId: response._scroll_id,
scroll: '30s'
});
granules = response.hits.hits.map((s) => s._source);
stream.write('\n');
stream.write(buildEMSRecords(emsMappings[reportType], granules).join('\n'));
numRetrieved += granules.length;
}
stream.end();
log.debug(`EMS ${reportType} generated with ${numRetrieved} records: ${filename}`);
// upload to s3
const reportKey = await determineReportKey(
reportType, startTime, bucketsPrefixes().reportsPrefix
);
const s3Uri = await uploadReportToS3(filename, process.env.system_bucket, reportKey);
return { reportType, file: s3Uri };
}
async function moveGranuleFileWithVersioning(source, target, sourceChecksumObject, copyOptions) {
const { checksumType, checksum } = sourceChecksumObject;
// compare the checksum of the existing file and new file, and handle them accordingly
const targetFileSum = await aws.calculateS3ObjectChecksum(
{ algorithm: (checksumType || 'CKSUM'), bucket: target.Bucket, key: target.Key }
);
const sourceFileSum = checksum || await aws.calculateS3ObjectChecksum(
{ algorithm: 'CKSUM', bucket: source.Bucket, key: source.Key }
);
// if the checksum of the existing file is the same as the new one, keep the existing file,
// else rename the existing file, and both files are part of the granule.
if (targetFileSum === sourceFileSum) {
await aws.deleteS3Object(source.Bucket, source.Key);
} else {
log.debug(`Renaming ${target.Key}...`);
await exports.renameS3FileWithTimestamp(target.Bucket, target.Key);
await exports.moveGranuleFile(
{ Bucket: source.Bucket, Key: source.Key },
{ Bucket: target.Bucket, Key: target.Key },
copyOptions
);
}
// return renamed files
return exports.getRenamedS3File(target.Bucket, target.Key);
}
async function throwErrorIfConfigured(event) {
const execution = event.config.execution;
const retryFilename = `${execution}_retry.txt`;
const bucket = event.config.bucket;
let isRetry = false;
if (event.config.passOnRetry) {
isRetry = await s3ObjectExists({
Bucket: bucket,
Key: retryFilename
});
}
if (event.config.passOnRetry && isRetry) {
log.debug('Detected retry');
// Delete file
await deleteS3Object(bucket, retryFilename);
} else if (event.config.fail) {
if (event.config.passOnRetry) {
await s3PutObject({
Bucket: bucket,
Key: retryFilename,
Body: ''
});
}
throw new Error('Step configured to force fail');
}
}
async function generateReport(startTime, endTime) {
log.debug(`ems-metadata-report.generateReport startTime: ${startTime} endTime: ${endTime}`);
const reportType = 'metadata';
const emsCollections = await getCollectionsForEms(startTime, endTime);
const report = emsCollections
.map((collection) => Object.values(collection.emsRecord).join('|&|'))
.join('\n');
const { reportsBucket, reportsPrefix } = bucketsPrefixes();
const reportKey = await determineReportKey(reportType, startTime, reportsPrefix);
const s3Uri = buildS3Uri(reportsBucket, reportKey);
log.info(`Uploading report to ${s3Uri}`);
return s3().putObject({
Bucket: reportsBucket,
const moveFileRequests = moveFileParams.map((moveFileParam) => {
const { source, target, file } = moveFileParam;
if (target) {
log.debug('moveGranuleFiles', source, target);
return moveGranuleFile(source, target).then(() => {
processedFiles.push({
bucket: target.Bucket,
key: target.Key,
name: file.name || file.fileName
});
});
}
let fileBucket;
let fileKey;
if (file.bucket && file.key) {
fileBucket = file.bucket;
fileKey = file.key;
} else if (file.filename) {
const parsed = aws.parseS3Uri(file.filename);
async function getCreds() {
if (process.env.cmr_oauth_provider === 'launchpad') {
const config = {
api: process.env.launchpad_api,
certificate: process.env.launchpad_certificate,
passphrase: process.env.launchpad_passphrase
};
log.debug('cmrjs.getCreds getLaunchpadToken');
const token = await getLaunchpadToken(config);
return {
provider: process.env.cmr_provider,
clientId: process.env.cmr_client_id,
token
};
}
const secret = await aws.secretsManager().getSecretValue({
SecretId: process.env.cmr_password_secret_name
}).promise();
return {
provider: process.env.cmr_provider,
clientId: process.env.cmr_client_id,
username: process.env.cmr_username,
async function cleanup() {
log.debug('ems-metadata-report cleanup old reports');
const { reportsPrefix, reportsSentPrefix } = bucketsPrefixes();
const jobs = [reportsPrefix, reportsSentPrefix]
.map((prefix) =>
getExpiredS3Objects(process.env.system_bucket, prefix, process.env.ems_retentionInDays)
.then((s3objects) => deleteS3Files(s3objects)));
return Promise.all(jobs);
}
.then((res) => {
log.debug('sns result:', res);
return res;
})
.catch((snsError) => {