Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
// Copyright 2016-2019, Pulumi Corporation. All rights reserved.
import * as aws from "@pulumi/aws";
import * as pulumi from "@pulumi/pulumi";
// Create a bucket each for TPS reports and their archived zips.
const tpsReports = new aws.s3.Bucket("tpsReports");
const tpsZips = new aws.s3.Bucket("tpsZips");
// Anytime a new TPS Report is uploaded, archive it in a zipfile.
tpsReports.onObjectCreated("zipTpsReports", async (e) => {
const admZip = require("adm-zip");
const s3 = new aws.sdk.S3();
for (const rec of e.Records || []) {
const zip = new admZip();
const [ buck, key ] = [ rec.s3.bucket.name, rec.s3.object.key ];
console.log(`Zipping ${buck}/${key} into ${tpsZips.bucket.get()}/${key}.zip`);
const data = await s3.getObject({ Bucket: buck, Key: key }).promise();
zip.addFile(key, data.Body);
await s3.putObject({
Bucket: tpsZips.bucket.get(),
Key: `${key}.zip`,
Body: zip.toBuffer(),
}).promise();
{
Effect: "Allow",
Principal: { AWS: serviceAccount },
Action: "s3:PutObject",
Resource: `arn:aws:s3:::${bucketName}/${logsPrefix}/AWSLogs/${accountId}/*`,
},
],
};
return JSON.stringify(policy);
}
const logsPrefix = "alb";
// accessLogsBucket will store request logs for the load balancer.
const accessLogsBucket = new aws.s3.Bucket("alb-access-logs");
let serviceAccount = aws.elasticloadbalancing.getServiceAccount();
let policy = pulumi
.all([accessLogsBucket.id, serviceAccount, awsAccountId])
.apply(([accessLogsBucketId, serviceAccountResult, accountId]) => {
return accessLogsBucketPolicyString(accessLogsBucketId, serviceAccountResult.arn, logsPrefix, accountId);
});
let accessLogsBucketPolicy = new aws.s3.BucketPolicy(
"accessLogsBucketPolicy",
{
bucket: accessLogsBucket.id,
policy: policy,
});
export let alb = new aws.elasticloadbalancingv2.LoadBalancer(
constructor(bucketName, path, opts) {
super("pulumi:examples:S3Folder", bucketName, {}, opts); // Register this component with name pulumi:examples:S3Folder
// Create a bucket and expose a website index document
let siteBucket = new aws.s3.Bucket(bucketName, {
website: {
indexDocument: "index.html",
},
}, { parent: this }); // specify resource parent
// For each file in the directory, create an S3 object stored in `siteBucket`
for (let item of require("fs").readdirSync(path)) {
let filePath = require("path").join(path, item);
let object = new aws.s3.BucketObject(item, {
bucket: siteBucket, // reference the s3.Bucket object
source: new pulumi.asset.FileAsset(filePath), // use FileAsset to point to a file
contentType: mime.getType(filePath) || undefined, // set the MIME type of the file
}, { parent: this }); // specify resource parent
}
// Set the access policy for the bucket so all objects are readable
let bucketPolicy = new aws.s3.BucketPolicy("bucketPolicy", {
bucket: siteBucket.bucket,
policy: siteBucket.bucket.apply(this.publicReadPolicyForBucket),
}, { parent: this }); // specify resource parent
this.bucketName = siteBucket.bucket;
this.websiteUrl = siteBucket.websiteEndpoint;
// Register output properties for this component
constructor(bucketName: string, opts: FileBucketOpts) {
this.bucket = new aws.s3.Bucket(bucketName);
this.fileContents = {};
this.files = {};
for (const file of opts.files) {
this.fileContents[file] = fs.readFileSync(file).toString();
this.files[file] = new aws.s3.BucketObject(file, {
bucket: this.bucket,
source: new pulumi.asset.FileAsset(file),
contentType: mime.getType(file) || undefined,
});
}
if (opts.policy !== undefined) {
// Set the access policy for the bucket so all objects are readable
this.policy = new aws.s3.BucketPolicy(`bucketPolicy`, {
bucket: this.bucket.bucket,
// policy: this.bucket.bucket.apply(publicReadPolicyForBucket)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import * as aws from "@pulumi/aws";
import * as serverless from "@pulumi/aws-serverless";
const bucket = new aws.s3.Bucket("testbucket", {
serverSideEncryptionConfiguration: {
rule: {
applyServerSideEncryptionByDefault: {
sseAlgorithm: "AES256",
},
},
},
forceDestroy: true,
});
const sqsQueue = new aws.sqs.Queue("queue", {
visibilityTimeoutSeconds: 300,
});
serverless.queue.subscribe("subscription", sqsQueue, async (event) => {
const awssdk = await import("aws-sdk");
pathToWebsiteContents: stackConfig.require("pathToWebsiteContents"),
// targetDomain is the domain/host to serve content at.
targetDomain: stackConfig.require("targetDomain"),
// alias is an optional domain alias the CDN will support as well.
alias: stackConfig.get("alias") || undefined,
// ACM certificate for the target domain. Must be in the us-east-1 region.
certificateArn: stackConfig.require("certificateArn"),
// redirectDomain is the domain to use for any redirects.
redirectDomain: stackConfig.get("redirectDomain") || undefined,
};
// redirectDomain is the domain to use when redirecting.
const redirectDomain = config.redirectDomain || config.targetDomain;
// contentBucket stores the static content to be served via the CDN.
const contentBucket = new aws.s3.Bucket(
"contentBucket",
{
bucket: config.targetDomain,
acl: "public-read",
// Have S3 serve its contents as if it were a website. This is how we get the right behavior
// for routes like "foo/", which S3 will automatically translate to "foo/index.html".
website: {
indexDocument: "index.html",
errorDocument: "404.html",
},
},
{
protect: false,
});
"use strict";
const aws = require("@pulumi/aws");
const pulumi = require("@pulumi/pulumi");
const mime = require("mime");
// Create a bucket and expose a website index document
let siteBucket = new aws.s3.Bucket("s3-website-bucket", {
website: {
indexDocument: "index.html",
},
});
let siteDir = "www"; // directory for content files
// For each file in the directory, create an S3 object stored in `siteBucket`
for (let item of require("fs").readdirSync(siteDir)) {
let filePath = require("path").join(siteDir, item);
let object = new aws.s3.BucketObject(item, {
bucket: siteBucket, // reference the s3.Bucket object
source: new pulumi.asset.FileAsset(filePath), // use FileAsset to point to a file
contentType: mime.getType(filePath) || undefined, // set the MIME type of the file
});
}
// Copyright 2016-2019, Pulumi Corporation. All rights reserved.
import * as aws from "@pulumi/aws";
import * as awsx from "@pulumi/awsx";
// A simple cluster to run our tasks in.
const cluster = awsx.ecs.Cluster.getDefault();
// A bucket to store videos and thumbnails.
const bucket = new aws.s3.Bucket("bucket");
// Export the bucket name.
export const bucketName = bucket.id;
// A task which runs a containerized FFMPEG job to extract a thumbnail image.
const ffmpegThumbnailTask = new awsx.ecs.FargateTaskDefinition("ffmpegThumbTask", {
container: {
image: awsx.ecs.Image.fromPath("ffmpegThumbTask", "./docker-ffmpeg-thumb"),
memoryReservation: 512,
},
});
// When a new video is uploaded, run the FFMPEG task on the video file.
// Use the time index specified in the filename (e.g. cat_00-01.mp4 uses timestamp 00:01)
bucket.onObjectCreated("onNewVideo", new aws.lambda.CallbackFunction("onNewVideo", {
// Specify appropriate policies so that this AWS lambda can run EC2 tasks.
const table = new aws.dynamodb.Table("testtable", {
attributes: [{
name: "id",
type: "S",
}],
hashKey: "id",
readCapacity: 5,
writeCapacity: 5,
streamEnabled: true,
streamViewType: "NEW_AND_OLD_IMAGES",
});
const tableMetric = table.metrics.throttledRequests();
const tableAlarm = tableMetric.createAlarm("alarm" + alarmIndex++, { threshold: 120, evaluationPeriods: 2 });
const bucket = new aws.s3.Bucket("b", {
acl: "private",
tags: {
Environment: "Dev",
Name: "My bucket",
},
});
const bucketMetric = bucket.metrics.firstByteLatency({ unit: "Seconds" });
const bucketAlarm = bucketMetric.createAlarm("alarm" + alarmIndex++, { threshold: 30 , evaluationPeriods: 2 });
const ubuntu = pulumi.output(aws.getAmi({
filters: [
{ name: "name", values: ["ubuntu/images/hvm-ssd/ubuntu-trusty-14.04-amd64-server-*"] },
{ name: "virtualization-type", values: ["hvm"] },
],
mostRecent: true,
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
import * as path from "path";
import { siteDir } from "./config";
const myBucket = new aws.s3.Bucket("my-bucket", {
website: {
indexDocument: "index.html",
},
});
const myObject = new aws.s3.BucketObject("index.html", {
acl: "public-read",
bucket: myBucket,
contentType: "text/html",
source: path.join(siteDir, "index.html"),
});
export const bucketName = myBucket.bucket;
export const bucketEndpoint = pulumi.interpolate`http://${myBucket.websiteEndpoint}`;