Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
}
// since we don't know the cluster name at this point, we must give this role star resource permissions
handler.addToRolePolicy(new iam.PolicyStatement({
actions: [ 'eks:CreateCluster', 'eks:DescribeCluster', 'eks:DeleteCluster', 'eks:UpdateClusterVersion' ],
resources: [ '*' ]
}));
// the CreateCluster API will allow the cluster to assume this role, so we
// need to allow the lambda execution role to pass it.
handler.addToRolePolicy(new iam.PolicyStatement({
actions: [ 'iam:PassRole' ],
resources: [ props.roleArn ]
}));
const resource = new cfn.CustomResource(this, 'Resource', {
resourceType: ClusterResource.RESOURCE_TYPE,
provider: cfn.CustomResourceProvider.lambda(handler),
properties: {
Config: props
}
});
this.ref = resource.ref;
this.attrEndpoint = Token.asString(resource.getAtt('Endpoint'));
this.attrArn = Token.asString(resource.getAtt('Arn'));
this.attrCertificateAuthorityData = Token.asString(resource.getAtt('CertificateAuthorityData'));
this.creationRole = handler.role!;
}
}
constructor(scope: Construct, id: string, props: KubernetesResourceProps) {
super(scope, id);
const stack = Stack.of(this);
// we maintain a single manifest custom resource handler for each cluster
const handler = props.cluster._k8sResourceHandler;
if (!handler) {
throw new Error(`Cannot define a KubernetesManifest resource on a cluster with kubectl disabled`);
}
new cfn.CustomResource(this, 'Resource', {
provider: cfn.CustomResourceProvider.lambda(handler),
resourceType: KubernetesResource.RESOURCE_TYPE,
properties: {
// `toJsonString` enables embedding CDK tokens in the manifest and will
// render a CloudFormation-compatible JSON string (similar to
// StepFunctions, CloudWatch Dashboards etc).
Manifest: stack.toJsonString(props.manifest),
}
});
}
}
constructor(scope: cdk.Construct, id: string, props: DemoResourceProps) {
super(scope, id);
const resource = new CustomResource(this, 'Resource', {
lambdaProvider: new lambda.SingletonFunction(this, 'Singleton', {
uuid: 'f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc',
// This makes the demo only work as top-level TypeScript program, but that's fine for now
code: lambda.Code.inline(fs.readFileSync('provider.py', { encoding: 'utf-8' })),
handler: 'index.main',
timeout: 300,
runtime: lambda.Runtime.Python27,
}),
properties: props
});
this.response = resource.getAtt('Response').toString();
}
}
for (const statement of props.policyStatements) {
provider.addToRolePolicy(statement);
}
} else { // Derive statements from AWS SDK calls
for (const call of [props.onCreate, props.onUpdate, props.onDelete]) {
if (call) {
provider.addToRolePolicy(new iam.PolicyStatement({
actions: [awsSdkToIamAction(call.service, call.action)],
resources: ['*']
}));
}
}
}
const create = props.onCreate || props.onUpdate;
this.customResource = new CustomResource(this, 'Resource', {
resourceType: 'Custom::AWS',
provider: CustomResourceProvider.lambda(provider),
properties: {
create: create && encodeBooleans(create),
update: props.onUpdate && encodeBooleans(props.onUpdate),
delete: props.onDelete && encodeBooleans(props.onDelete)
}
});
}
constructor(scope: Construct, id: string, props: HelmChartProps) {
super(scope, id);
const stack = Stack.of(this);
// we maintain a single manifest custom resource handler for each cluster
const handler = this.getOrCreateHelmChartHandler(props.cluster);
if (!handler) {
throw new Error(`Cannot define a Helm chart on a cluster with kubectl disabled`);
}
new CustomResource(this, 'Resource', {
provider: CustomResourceProvider.lambda(handler),
resourceType: HelmChart.RESOURCE_TYPE,
properties: {
Release: props.release || this.node.uniqueId.slice(-63).toLowerCase(), // Helm has a 63 character limit for the name
Chart: props.chart,
Version: props.version,
Values: (props.values ? stack.toJsonString(props.values) : undefined),
Namespace: props.namespace || 'default',
Repository: props.repository
}
});
}
}
} else { // Derive statements from AWS SDK calls
for (const call of [props.onCreate, props.onUpdate, props.onDelete]) {
if (call) {
provider.addToRolePolicy(new iam.PolicyStatement({
actions: [awsSdkToIamAction(call.service, call.action)],
resources: ['*']
}));
}
}
}
const create = props.onCreate || props.onUpdate;
this.customResource = new CustomResource(this, 'Resource', {
resourceType: 'Custom::AWS',
provider: CustomResourceProvider.lambda(provider),
properties: {
create: create && encodeBooleans(create),
update: props.onUpdate && encodeBooleans(props.onUpdate),
delete: props.onDelete && encodeBooleans(props.onDelete)
}
});
}
constructor(scope: Construct, id: string, props: HelmChartProps) {
super(scope, id);
const stack = Stack.of(this);
// we maintain a single manifest custom resource handler for each cluster
const handler = this.getOrCreateHelmChartHandler(props.cluster);
if (!handler) {
throw new Error(`Cannot define a Helm chart on a cluster with kubectl disabled`);
}
new CustomResource(this, 'Resource', {
provider: CustomResourceProvider.lambda(handler),
resourceType: HelmChart.RESOURCE_TYPE,
properties: {
Release: props.release || this.node.uniqueId.slice(-63).toLowerCase(), // Helm has a 63 character limit for the name
Chart: props.chart,
Version: props.version,
Values: (props.values ? stack.toJsonString(props.values) : undefined),
Namespace: props.namespace || 'default',
Repository: props.repository
}
});
}
constructor(scope: Construct, id: string, props: KubernetesResourceProps) {
super(scope, id);
const stack = Stack.of(this);
// we maintain a single manifest custom resource handler for each cluster
const handler = props.cluster._k8sResourceHandler;
if (!handler) {
throw new Error(`Cannot define a KubernetesManifest resource on a cluster with kubectl disabled`);
}
new cfn.CustomResource(this, 'Resource', {
provider: cfn.CustomResourceProvider.lambda(handler),
resourceType: KubernetesResource.RESOURCE_TYPE,
properties: {
// `toJsonString` enables embedding CDK tokens in the manifest and will
// render a CloudFormation-compatible JSON string (similar to
// StepFunctions, CloudWatch Dashboards etc).
Manifest: stack.toJsonString(props.manifest),
}
});
}
}
// since we don't know the cluster name at this point, we must give this role star resource permissions
handler.addToRolePolicy(new iam.PolicyStatement({
actions: [ 'eks:CreateCluster', 'eks:DescribeCluster', 'eks:DeleteCluster', 'eks:UpdateClusterVersion' ],
resources: [ '*' ]
}));
// the CreateCluster API will allow the cluster to assume this role, so we
// need to allow the lambda execution role to pass it.
handler.addToRolePolicy(new iam.PolicyStatement({
actions: [ 'iam:PassRole' ],
resources: [ props.roleArn ]
}));
const resource = new cfn.CustomResource(this, 'Resource', {
resourceType: ClusterResource.RESOURCE_TYPE,
provider: cfn.CustomResourceProvider.lambda(handler),
properties: {
Config: props
}
});
this.ref = resource.ref;
this.attrEndpoint = Token.asString(resource.getAtt('Endpoint'));
this.attrArn = Token.asString(resource.getAtt('Arn'));
this.attrCertificateAuthorityData = Token.asString(resource.getAtt('CertificateAuthorityData'));
this.creationRole = handler.role!;
}
}
constructor(scope: cdk.App, id: string, props?: cdk.StackProps) {
super(scope, id, props);
// pick up to 3 AZs from environment.
const azs = new cdk.AvailabilityZoneProvider(this).availabilityZones.slice(0, 3);
// add an "AWS::CloudFormation::Stack" resource which uses the MongoDB quickstart
// https://aws.amazon.com/quickstart/architecture/mongodb/
// only non-default values are provided here.
new CfnStack(this, 'NestedStack', {
templateUrl: 'https://s3.amazonaws.com/quickstart-reference/mongodb/latest/templates/mongodb-master.template',
parameters: {
KeyPairName: 'my-key-pair',
RemoteAccessCIDR: '0.0.0.0/0',
AvailabilityZones: azs.join(','),
NumberOfAZs: azs.length.toString(),
MongoDBAdminPassword: 'root1234',
}
});
}
}