Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
constructor(scope: cdk.Construct, id: string, props: BucketDeploymentProps) {
super(scope, id);
if (props.distributionPaths && !props.distribution) {
throw new Error("Distribution must be specified if distribution paths are specified");
}
const sourceHash = calcSourceHash(handlerSourceDirectory);
// tslint:disable-next-line: no-console
console.error({sourceHash});
const handler = new lambda.SingletonFunction(this, 'CustomResourceHandler', {
uuid: this.renderSingletonUuid(props.memoryLimit),
code: lambda.Code.fromAsset(handlerCodeBundle, { sourceHash }),
runtime: lambda.Runtime.PYTHON_3_6,
handler: 'index.handler',
lambdaPurpose: 'Custom::CDKBucketDeployment',
timeout: cdk.Duration.minutes(15),
role: props.role,
memorySize: props.memoryLimit
});
const sources: SourceConfig[] = props.sources.map((source: ISource) => source.bind(this));
sources.forEach(source => source.bucket.grantRead(handler));
props.destinationBucket.grantReadWrite(handler);
if (props.distribution) {
handler.addToRolePolicy(new iam.PolicyStatement({
effect: iam.Effect.ALLOW,
actions: ['cloudfront:GetInvalidation', 'cloudfront:CreateInvalidation'],
resources: ['*'],
constructor(scope: cdk.Construct, id: string, props: BucketDeploymentProps) {
super(scope, id);
if (props.distributionPaths && !props.distribution) {
throw new Error("Distribution must be specified if distribution paths are specified");
}
const sourceHash = calcSourceHash(handlerSourceDirectory);
// tslint:disable-next-line: no-console
console.error({sourceHash});
const handler = new lambda.SingletonFunction(this, 'CustomResourceHandler', {
uuid: this.renderSingletonUuid(props.memoryLimit),
code: lambda.Code.fromAsset(handlerCodeBundle, { sourceHash }),
runtime: lambda.Runtime.PYTHON_3_6,
handler: 'index.handler',
lambdaPurpose: 'Custom::CDKBucketDeployment',
timeout: cdk.Duration.minutes(15),
role: props.role,
memorySize: props.memoryLimit
});
const sources: SourceConfig[] = props.sources.map((source: ISource) => source.bind(this));
sources.forEach(source => source.bucket.grantRead(handler));
props.destinationBucket.grantReadWrite(handler);
if (props.distribution) {
handler.addToRolePolicy(new iam.PolicyStatement({
effect: iam.Effect.ALLOW,
actions: ['cloudfront:GetInvalidation', 'cloudfront:CreateInvalidation'],
constructor(scope: cdk.Construct, id: string, props: BucketDeploymentProps) {
super(scope, id);
if (props.distributionPaths && !props.distribution) {
throw new Error("Distribution must be specified if distribution paths are specified");
}
const sourceHash = calcSourceHash(handlerSourceDirectory);
// tslint:disable-next-line: no-console
console.error({sourceHash});
const handler = new lambda.SingletonFunction(this, 'CustomResourceHandler', {
uuid: this.renderSingletonUuid(props.memoryLimit),
code: lambda.Code.fromAsset(handlerCodeBundle, { sourceHash }),
runtime: lambda.Runtime.PYTHON_3_6,
handler: 'index.handler',
lambdaPurpose: 'Custom::CDKBucketDeployment',
timeout: cdk.Duration.minutes(15),
role: props.role,
memorySize: props.memoryLimit
});
const sources: SourceConfig[] = props.sources.map((source: ISource) => source.bind(this));
sources.forEach(source => source.bucket.grantRead(handler));
props.destinationBucket.grantReadWrite(handler);
if (props.distribution) {
handler.addToRolePolicy(new iam.PolicyStatement({
constructor(parent, id, props) {
super(parent, id, props);
// Create a lambda that recrawls changelogs discovered in the past
const recrawlLambda = new lambda.Function(this, 'recrawl', {
runtime: lambda.Runtime.NODEJS_10_X,
handler: 'recrawl.handle',
code: lambda.Code.asset('./app/recrawl'),
timeout: cdk.Duration.minutes(5),
environment: {
CHANGELOGS_TABLE_NAME: props.changelogsTable.tableName,
DISCOVERED_TOPIC_NAME: props.toCrawlTopic.topicArn
}
});
// Grant the lambda permission to modify the tables
props.changelogsTable.grantReadWriteData(recrawlLambda.role);
props.toCrawlTopic.grantPublish(recrawlLambda.role);
// Schedule the recrawler to run once every minute
this.eventRule = new events.Rule(this, 'recrawl-check-schedule', {
datapointsToAlarm: 1,
treatMissingData: cloudwatch.TreatMissingData.NOT_BREACHING,
alarmDescription: "Detect errors",
alarmName: "cloudtrail_partitioner_errors"
});
// Create SNS for alarms to be sent to
const sns_topic = new sns.Topic(this, 'cloudtrail_partitioner_alarm', {
displayName: 'cloudtrail_partitioner_alarm'
});
// Connect the alarm to the SNS
error_alarm.addAlarmAction(new cloudwatch_actions.SnsAction(sns_topic));
// Create Lambda to forward alarms
const alarm_forwarder = new lambda.Function(this, "alarm_forwarder", {
runtime: lambda.Runtime.PYTHON_3_7,
code: lambda.Code.asset("resources/alarm_forwarder"),
handler: "main.handler",
description: "Forwards alarms from the local SNS to another",
logRetention: logs.RetentionDays.TWO_WEEKS,
timeout: cdk.Duration.seconds(30),
memorySize: 128,
environment: {
"ALARM_SNS": config['alarm_sns_arn']
},
});
// Add priv to publish the events so the alarms can be forwarded
alarm_forwarder.addToRolePolicy(new iam.PolicyStatement({
resources: [config['alarm_sns_arn']],
actions: ['sns:Publish']
constructor(parent, id, props) {
super(parent, id, props);
// Create a lambda that returns autocomplete results
const recentlyCrawled = new lambda.Function(this, 'recently-crawled', {
runtime: lambda.Runtime.NODEJS_10_X,
handler: 'recently-crawled.handle',
code: lambda.Code.asset('./app/recently-crawled'),
environment: {
FEEDS_TABLE_NAME: props.feedsTable.tableName,
API_BUCKET_NAME: props.apiBucket.bucketName
}
});
// Grant the lambda permission to modify the tables and S3 bucket
props.feedsTable.grantReadWriteData(recentlyCrawled.role);
props.apiBucket.grantReadWrite(recentlyCrawled.role);
// Schedule the recrawler to run once every minute
this.eventRule = new events.Rule(this, 'recrawl-check-schedule', {
schedule: events.Schedule.rate(cdk.Duration.minutes(1)),
sources: [s3Upload.Source.asset("./.next/static")],
destinationBucket: bucket,
destinationKeyPrefix: "_next/static"
});
new s3Upload.BucketDeployment(stack, `${id}-s3-public-files`, {
sources: [s3Upload.Source.asset("./public")],
destinationBucket: bucket,
destinationKeyPrefix: "public"
});
new s3Upload.BucketDeployment(stack, `${id}-s3-static-files`, {
sources: [s3Upload.Source.asset("./static")],
destinationBucket: bucket,
destinationKeyPrefix: "static"
});
const ssrLambda = new lambda.Function(stack, `${id}-ssr-lambda`, {
code: new lambda.AssetCode(`./${LAMBDA_BUILD_DIR}`),
handler: "index.handler",
runtime: lambda.Runtime.NODEJS_8_10,
environment: {}
});
// const cloudFrontOrigins = [
// {
// url: bucket.bucketWebsiteUrl,
// private: true,
// pathPatterns: {
// "_next/*": {
// ttl: 86400
// },
// "static/*": {
// ttl: 86400
constructor(parent, id, props) {
super(parent, id, props);
// Create a lambda that does the crawl
const crawlLambda = new lambda.Function(this, 'crawl-lambda', {
runtime: lambda.Runtime.NODEJS_10_X,
handler: 'crawl.handle',
code: lambda.Code.asset('./app/crawl'),
timeout: cdk.Duration.seconds(30),
vpc: props.vpc,
environment: {
GITHUB_CLIENT_ID: githubSecrets.clientId,
GITHUB_SECRET: githubSecrets.secret,
CHANGELOGS_TABLE_NAME: props.changelogsTable.tableName,
FEEDS_TABLE_NAME: props.feedsTable.tableName,
SEARCH_INDEX_TABLE_NAME: props.searchIndexTable.tableName,
API_BUCKET_NAME: props.apiBucket.bucketName,
WEB_BUCKET_NAME: props.webBucket.bucketName,
REDIS_HOST: props.redis.cluster.attrRedisEndpointAddress,
REDIS_PORT: props.redis.cluster.attrRedisEndpointPort
}
constructor(scope: cdk.Construct, id: string, props: InstanceDrainHookProps) {
super(scope, id);
const drainTime = props.drainTime || cdk.Duration.minutes(5);
// Invoke Lambda via SNS Topic
const fn = new lambda.Function(this, 'Function', {
code: lambda.Code.fromInline(fs.readFileSync(path.join(__dirname, 'lambda-source', 'index.py'), { encoding: 'utf-8' })),
handler: 'index.lambda_handler',
runtime: lambda.Runtime.PYTHON_3_6,
// Timeout: some extra margin for additional API calls made by the Lambda,
// up to a maximum of 15 minutes.
timeout: cdk.Duration.seconds(Math.min(drainTime.toSeconds() + 10, 900)),
environment: {
CLUSTER: props.cluster.clusterName
}
});
// Hook everything up: ASG -> Topic, Topic -> Lambda
props.autoScalingGroup.addLifecycleHook('DrainHook', {
lifecycleTransition: autoscaling.LifecycleTransition.INSTANCE_TERMINATING,
defaultResult: autoscaling.DefaultResult.CONTINUE,
notificationTarget: new hooks.FunctionHook(fn),
private getOrCreateHelmChartHandler(cluster: Cluster): lambda.IFunction | undefined {
if (!cluster.kubectlEnabled) {
return undefined;
}
let handler = cluster.node.tryFindChild('HelmChartHandler') as lambda.IFunction;
if (!handler) {
handler = new lambda.Function(cluster, 'HelmChartHandler', {
code: lambda.Code.fromAsset(path.join(__dirname, 'helm-chart')),
runtime: lambda.Runtime.PYTHON_3_7,
handler: 'index.handler',
timeout: Duration.minutes(15),
layers: [ KubectlLayer.getOrCreate(this, { version: "2.0.0-beta1" }) ],
memorySize: 256,
environment: {
CLUSTER_NAME: cluster.clusterName,
},
// NOTE: we must use the default IAM role that's mapped to "system:masters"
// as the execution role of this custom resource handler. This is the only
// way to be able to interact with the cluster after it's been created.
role: cluster._defaultMastersRole,
});
}
return handler;
}