Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
// Copyright 2016-2017, Pulumi Corporation. All rights reserved.
import * as aws from "@pulumi/aws";
import * as other from "./other/index"
// Validate that 'require'd packages are captured correctly.
function getContentType() {
let mime = require('mime-types');
return mime.contentType(".js");
}
const testFunc = new aws.serverless.Function("f", {
policies: [aws.iam.AWSLambdaFullAccess],
includePaths: ['./Pulumi.yaml'],
}, async (ev, ctx, cb) => {
var aws = await import('aws-sdk');
var express = await import('express');
var os = require('os');
var slack = require('@slack/client');
var answer = other.answer;
console.log(answer);
getContentType();
});
exports.functionARN = testFunc.lambda.arn;
// Copyright 2016-2017, Pulumi Corporation. All rights reserved.
import * as aws from "@pulumi/aws";
import * as other from "./other/index"
// Validate that 'require'd packages are captured correctly.
function getContentType() {
let mime = require('mime-types');
return mime.contentType(".js");
}
const testFunc = new aws.serverless.Function("f", {
policies: [aws.iam.AWSLambdaFullAccess],
includePaths: ['./Pulumi.yaml'],
}, async (ev, ctx, cb) => {
var aws = await import('aws-sdk');
var express = await import('express');
var os = require('os');
var slack = require('@slack/client');
var answer = other.answer;
console.log(answer);
getContentType();
});
exports.functionARN = testFunc.lambda.arn;
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const config = new pulumi.Config();
const createSg = config.getBoolean("createSg") || false;
// Accept the AWS region as input.
const awsRegion = config.get("awsRegion") || "us-west-2";
const inUsEast1 = (awsRegion === "us-east-1");
// Optionally create a security group and attach some rules.
let defaultSecurityGroup: aws.ec2.SecurityGroup | undefined;
if (createSg) {
defaultSecurityGroup = new aws.ec2.SecurityGroup("default", {
description: "Default security group",
});
}
// SSH access from anywhere
let ingress: aws.ec2.SecurityGroupRule | undefined;
if (createSg) {
ingress = new aws.ec2.SecurityGroupRule("ingress", {
cidrBlocks: ["0.0.0.0/0"],
fromPort: 22,
protocol: "tcp",
securityGroupId: defaultSecurityGroup!.id,
toPort: 22,
type: "ingress",
});
}
// outbound internet access
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
// Originally defined at main.tf:5
const ubuntu = pulumi.output(aws.getAmi({
filters: [
{
name: "name",
values: ["ubuntu/images/hvm-ssd/ubuntu-trusty-14.04-amd64-server-*"],
},
{
name: "virtualization-type",
values: ["hvm"],
},
],
mostRecent: true,
owners: ["099720109477"],
}, { async: true }));
// Originally defined at main.tf:21
const web = new aws.ec2.Instance("web", {
ami: ubuntu.id,
//
// Note that the VPC has been tagged appropriately.
const defaultVpc = new aws.ec2.Vpc("default", {
cidrBlock: "10.0.0.0/16", // Just one CIDR block
enableDnsHostnames: true, // Definitely want DNS hostnames.
// The tag collection for this VPC.
tags: {
// Ensure that we tag this VPC with a Name.
Name: "test",
},
});
// Use some data sources.
const defaultSubnetIds = defaultVpc.id.apply(id => aws.ec2.getSubnetIds({
vpcId: id,
}, { async: true }));
const defaultAvailabilityZones = pulumi.output(aws.getAvailabilityZones({ async: true }));
const defaultAvailabilityZone: pulumi.Output[] = [];
for (let i = 0; i < defaultAvailabilityZones.apply(defaultAvailabilityZones => defaultAvailabilityZones.ids.length); i++) {
defaultAvailabilityZone.push(defaultAvailabilityZones.apply(defaultAvailabilityZones => aws.getAvailabilityZone({
zoneId: defaultAvailabilityZones.zoneIds[i],
}, { async: true })));
}
// The VPC details
const vpc = [{
// The ID
id: defaultVpc.id,
}];
// The region, again
const region = awsRegion; // why not
// Create a security group.
//
// This group should allow SSH and HTTP access.
let globalInfrastructureResource: InfrastructureResource | undefined;
export function getGlobalInfrastructureResource(): pulumi.Resource {
if (!globalInfrastructureResource) {
globalInfrastructureResource = new InfrastructureResource();
}
return globalInfrastructureResource;
}
// Whether or not we should run lamabda-based compute in the private network
export let runLambdaInVPC: boolean = config.usePrivateNetwork;
// The IAM Role Policies to apply to compute for both Lambda and ECS
const defaultComputePolicies = [
aws.iam.AWSLambdaFullAccess, // Provides wide access to "serverless" services (Dynamo, S3, etc.)
aws.iam.AmazonEC2ContainerServiceFullAccess, // Required for lambda compute to be able to run Tasks
];
let computePolicies: aws.ARN[] = config.computeIAMRolePolicyARNs
? config.computeIAMRolePolicyARNs.split(",")
: defaultComputePolicies;
let computePoliciesAccessed = false;
// Set the IAM policies to use for compute.
export function setComputeIAMRolePolicies(policyARNs: string[]) {
if (computePoliciesAccessed) {
throw new RunError(
"The compute policies have already been used, make sure you are setting IAM policies early enough.");
}
computePolicies = policyARNs;
}
// Copyright 2016-2019, Pulumi Corporation. All rights reserved.
import * as aws from "@pulumi/aws";
import * as pulumi from "@pulumi/pulumi";
// Create a bucket each for TPS reports and their archived zips.
const tpsReports = new aws.s3.Bucket("tpsReports");
const tpsZips = new aws.s3.Bucket("tpsZips");
// Anytime a new TPS Report is uploaded, archive it in a zipfile.
tpsReports.onObjectCreated("zipTpsReports", async (e) => {
const admZip = require("adm-zip");
const s3 = new aws.sdk.S3();
for (const rec of e.Records || []) {
const zip = new admZip();
const [ buck, key ] = [ rec.s3.bucket.name, rec.s3.object.key ];
console.log(`Zipping ${buck}/${key} into ${tpsZips.bucket.get()}/${key}.zip`);
const data = await s3.getObject({ Bucket: buck, Key: key }).promise();
zip.addFile(key, data.Body);
await s3.putObject({
Bucket: tpsZips.bucket.get(),
Key: `${key}.zip`,
Body: zip.toBuffer(),
}).promise();
const zlib = await import("zlib");
const payload = new Buffer(ev.awslogs.data, "base64");
const result = zlib.gunzipSync(payload);
console.log(result.toString("utf8"));
cb(null, {});
} catch (err) {
cb(err);
}
},
{ parent: this },
);
this.lambda = collector.lambda;
// Although Lambda will create this on-demand, we create the log group explicitly so that we can delete it when
// the stack gets torn down.
const logGroup = new aws.cloudwatch.LogGroup(name, {
name: this.lambda.name.apply(n => "/aws/lambda/" + n),
}, { parent: this });
const region = aws.config.requireRegion();
const permission = new aws.lambda.Permission(name, {
action: "lambda:invokeFunction",
function: this.lambda,
principal: "logs." + region + ".amazonaws.com",
}, { parent: this });
}
}
const clean = GraphqlWsOverWebSocketOverHttpStorageCleaner({
connectionStorage: webSocketOverHttpStorage.connections,
subscriptionStorage: webSocketOverHttpStorage.subscriptions,
})
await clean()
console.log("End storage cleanup lambda callback")
return
}
})
/** EventRule that produces events on a regular interval that will trigger the cleanupStorageLambdaFunction */
const storageCleanupSchedulerEventRule = new aws.cloudwatch.EventRule(`${name}-storageCleanupScheduler`, {
description: "Every 1 minute",
// https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html
scheduleExpression: "rate(1 minute)",
});
const storageCleanupEventSubscription = new aws.cloudwatch.EventRuleEventSubscription(
`${name}-storageCleanup-es`,
storageCleanupSchedulerEventRule,
cleanupStorageLambdaFunction,
{},
)
return { routes };
};
});
let lambda = new aws.lambda.Function("mylambda", {
code: new asset.AssetArchive({
"index.js": new asset.StringAsset(
"exports.handler = (e, c, cb) => cb(null, {statusCode: 200, body: 'Hello, world!'});",
),
}),
role: role.arn,
handler: "index.handler",
runtime: aws.lambda.NodeJS6d10Runtime,
});
///////////////////
// Logging
///////////////////
let logGroup = new aws.cloudwatch.LogGroup("/aws/lambda/mylambda", {
retentionInDays: 7,
});
let logcollector = new aws.lambda.Function("mylambda-logcollector", {
code: new asset.AssetArchive({
"index.js": new asset.StringAsset(
"exports.handler = (e, c, cb) => console.log(e);",
),
}),
role: role.arn,
handler: "index.handler",
runtime: aws.lambda.NodeJS6d10Runtime,
});
let permission = new aws.lambda.Permission("logcollector-permission", {
action: "lambda:InvokeFunction",