Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
eventHandler: async (event) => {
const route = event.pathParameters!["route"];
console.log(`Getting count for '${route}'`);
const client = new aws.sdk.DynamoDB.DocumentClient();
// get previous value and increment
// reference outer `counterTable` object
const tableData = await client.get({
TableName: counterTable.name.get(),
Key: { id: route },
ConsistentRead: true,
}).promise();
const value = tableData.Item;
let count = (value && value.count) || 0;
await client.put({
TableName: counterTable.name.get(),
Item: { id: route, count: ++count },
}).promise();
return async function run(params: RunTaskRequest) {
const ecs = new aws.sdk.ECS();
const cluster = params.cluster;
const clusterArn = cluster.id.get();
const securityGroupIds = cluster.securityGroups.map(g => g.id.get());
const publicSubnetIds = await cluster.vpc.publicSubnetIds;
const subnetIds = publicSubnetIds.map(i => i.get());
const assignPublicIp = isFargate; // && !usePrivateSubnets;
// Run the task
return ecs.runTask({
taskDefinition: taskDefArn.get(),
launchType: isFargate ? "FARGATE" : "EC2",
networkConfiguration: {
awsvpcConfiguration: {
return function run(params: RunTaskRequest) {
const ecs = new aws.sdk.ECS();
const cluster = params.cluster;
const clusterArn = cluster.id.get();
const securityGroupIds = cluster.securityGroups.map(g => g.id.get());
const subnetIds = cluster.vpc.publicSubnetIds.map(i => i.get());
const assignPublicIp = isFargate; // && !usePrivateSubnets;
// Run the task
return ecs.runTask({
taskDefinition: taskDefArn.get(),
launchType: isFargate ? "FARGATE" : "EC2",
networkConfiguration: {
awsvpcConfiguration: {
assignPublicIp: assignPublicIp ? "ENABLED" : "DISABLED",
securityGroups: securityGroupIds,
subnets: subnetIds,
this.run = async function (options: TaskRunOptions = {}) {
const ecs = new aws.sdk.ECS();
const innerContainers = containersOutput.get();
const containerName = options.containerName || Object.keys(innerContainers)[0];
if (!containerName) {
throw new Error("No valid container name found to run task for.");
}
const container = innerContainers[containerName];
// Extract the environment values from the options
const env: { name: string, value: string }[] = [];
addEnvironmentVariables(container.environment);
addEnvironmentVariables(options && options.environment);
const assignPublicIp = isFargate && !cluster.network.usePrivateSubnets;
tpsReports.onObjectCreated("zipTpsReports", async (e) => {
const admZip = require("adm-zip");
const s3 = new aws.sdk.S3();
for (const rec of e.Records || []) {
const zip = new admZip();
const [ buck, key ] = [ rec.s3.bucket.name, rec.s3.object.key ];
console.log(`Zipping ${buck}/${key} into ${tpsZips.bucket.get()}/${key}.zip`);
const data = await s3.getObject({ Bucket: buck, Key: key }).promise();
zip.addFile(key, data.Body);
await s3.putObject({
Bucket: tpsZips.bucket.get(),
Key: `${key}.zip`,
Body: zip.toBuffer(),
}).promise();
}
});
isVerified: s.user.verified,
isRetweet: s.retweeted_status != null,
url: `https://twitter.com/${user}/status/${s.id_str}`,
});
});
return resolve(results);
});
});
console.log(`Got ${tweets.length} tweets from Twitter for query ${twitterQuery}`);
const filename = `${outputFolder}/${Date.now()}`;
const contents = Buffer.from(tweets.join("\n"), "utf8");
const s3 = new aws.sdk.S3();
await s3.putObject({
Bucket: bucket.id.get(),
Key: filename,
Body: contents,
}).promise();
});
const emptyTrash: aws.cloudwatch.EventRuleEventHandler = async (
event: aws.cloudwatch.EventRuleEvent
) => {
const s3Client = new aws.sdk.S3();
const bucket = trashBucket.id.get();
const { Contents = [] } = await s3Client
.listObjects({ Bucket: bucket })
.promise();
const objects: ObjectIdentifier[] = Contents.map(object => {
return { Key: object.Key! };
});
await s3Client
.deleteObjects({
Bucket: bucket,
Delete: { Objects: objects, Quiet: false }
})
.promise();
};