Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
const privateSubnetIds = config.privateSubnetIds;
const securityGroupIds = config.securityGroupIds;
const clusterName = config.clusterName;
// Generate a strong password for the Postgres DB.
const password = new random.RandomPassword(`${projectName}-password`, {
length: 16,
overrideSpecial: "_%@",
special: true,
}).result;
// Create a Postgres DB instance of RDS.
const dbSubnets = new aws.rds.SubnetGroup(`${projectName}-subnets`, {
subnetIds: privateSubnetIds
});
const db = new aws.rds.Instance("postgresdb", {
engine: "postgres",
instanceClass: "db.t2.micro",
allocatedStorage: 20,
dbSubnetGroupName: dbSubnets.id,
vpcSecurityGroupIds: securityGroupIds,
name: "testdb",
username: "alice",
password: password,
skipFinalSnapshot: true,
});
// Create a Secret from the DB connection information.
const provider = new k8s.Provider("provider", {kubeconfig: config.kubeconfig});
const dbConn = new k8s.core.v1.Secret("postgres-db-conn",
{
data: {
// Create a basic cluster and autoscaling group
const cluster = new awsx.ecs.Cluster("airflow", { vpc });
const autoScalingGroup = cluster.createAutoScalingGroup("airflow", {
subnetIds: vpc.publicSubnetIds,
templateParameters: {
minSize: 20,
},
launchConfigurationArgs: {
instanceType: "t2.xlarge",
},
});
const securityGroupIds = cluster.securityGroups.map(g => g.id);
const dbSubnets = new aws.rds.SubnetGroup("dbsubnets", {
subnetIds: vpc.publicSubnetIds,
});
const db = new aws.rds.Instance("postgresdb", {
engine: "postgres",
instanceClass: "db.t2.micro",
allocatedStorage: 20,
dbSubnetGroupName: dbSubnets.id,
vpcSecurityGroupIds: securityGroupIds,
name: "airflow",
username: "airflow",
password: dbPassword,
const dbName = process.env.DB_NAME;
const dbUser = process.env.DB_USER;
const dbPassword = process.env.DB_PASS;
if (!dbName || !dbPassword || !dbUser) {
throw new Error("DB Credentials invalid");
}
const engineMode = getEngineMode();
const publiclyAccessible = getIsPubliclyAccessible();
if (publiclyAccessible) {
// TODO: we need to create security group with public access to DB
}
const postgresqlCluster = new aws.rds.Cluster("gauzy-db", {
availabilityZones: ["us-east-1a", "us-east-1b", "us-east-1c"],
backupRetentionPeriod: 30,
clusterIdentifier: "gauzy-db",
// TODO: set false in production
skipFinalSnapshot: true,
databaseName: dbName,
storageEncrypted: true,
engine: "aurora-postgresql",
engineVersion: "10.7",
masterPassword: dbPassword,
masterUsername: dbUser,
preferredBackupWindow: "07:00-09:00",
deletionProtection: false,
engineMode
skipFinalSnapshot: true,
databaseName: dbName,
storageEncrypted: true,
engine: "aurora-postgresql",
engineVersion: "10.7",
masterPassword: dbPassword,
masterUsername: dbUser,
preferredBackupWindow: "07:00-09:00",
deletionProtection: false,
engineMode
});
// for engineMode: "serverless" we don't need instances
if (engineMode == "provisioned") {
new aws.rds.ClusterInstance("gauzy-db-1", {
engine: "aurora-postgresql",
engineVersion: postgresqlCluster.engineVersion,
applyImmediately: true,
clusterIdentifier: postgresqlCluster.id,
identifier: "gauzy-db-1",
instanceClass: "db.t3.medium",
autoMinorVersionUpgrade: true,
availabilityZone: "us-east-1a",
performanceInsightsEnabled: true,
publiclyAccessible
});
}
return postgresqlCluster;
};
const projectName = pulumi.getProject();
const privateSubnetIds = config.privateSubnetIds;
const securityGroupIds = config.securityGroupIds;
const clusterName = config.clusterName;
// Generate a strong password for the Postgres DB.
const password = new random.RandomPassword(`${projectName}-password`, {
length: 16,
overrideSpecial: "_%@",
special: true,
}).result;
// Create a Postgres DB instance of RDS.
const dbSubnets = new aws.rds.SubnetGroup(`${projectName}-subnets`, {
subnetIds: privateSubnetIds
});
const db = new aws.rds.Instance("postgresdb", {
engine: "postgres",
instanceClass: "db.t2.micro",
allocatedStorage: 20,
dbSubnetGroupName: dbSubnets.id,
vpcSecurityGroupIds: securityGroupIds,
name: "testdb",
username: "alice",
password: password,
skipFinalSnapshot: true,
});
// Create a Secret from the DB connection information.
const provider = new k8s.Provider("provider", {kubeconfig: config.kubeconfig});
const pulumiProgram = async () => {
const vpc = awsx.ec2.Vpc.getDefault();
const subnetGroup = new aws.rds.SubnetGroup("dbsubnet", {
subnetIds: vpc.publicSubnetIds,
});
// make a public SG for our cluster for the migration
const securityGroup = new awsx.ec2.SecurityGroup("publicGroup", {
egress: [
{
protocol: "-1",
fromPort: 0,
toPort: 0,
cidrBlocks: ["0.0.0.0/0"],
}
],
ingress: [
{
protocol: "-1",
import * as pulumi from "@pulumi/pulumi";
// Import our Pulumi configuration.
const config = new pulumi.Config();
const dbName = config.require("db_name");
const dbUsername = config.require("db_username");
const dbPassword = config.require("db_password");
const adminUsername = config.require("admin_username");
const adminPassword = config.require("admin_password");
// Get the default VPC and ECS Cluster for your account.
const vpc = awsx.ec2.Vpc.getDefault();
const cluster = awsx.ecs.Cluster.getDefault();
// Create a new subnet group for the database.
const subnetGroup = new aws.rds.SubnetGroup("dbsubnets", {
subnetIds: vpc.publicSubnetIds,
});
// Create a new database, using the subnet and cluster groups.
const db = new aws.rds.Instance("db", {
engine: "postgres",
instanceClass: aws.rds.InstanceTypes.T3_Micro,
allocatedStorage: 5,
dbSubnetGroupName: subnetGroup.id,
vpcSecurityGroupIds: cluster.securityGroups.map(g => g.id),
name: dbName,
username: dbUsername,
password: dbPassword,
skipFinalSnapshot: true,
});
const adminUsername = config.require("admin_username");
const adminPassword = config.require("admin_password");
// Get the default VPC and ECS Cluster for your account.
const vpc = awsx.ec2.Vpc.getDefault();
const cluster = awsx.ecs.Cluster.getDefault();
// Create a new subnet group for the database.
const subnetGroup = new aws.rds.SubnetGroup("dbsubnets", {
subnetIds: vpc.publicSubnetIds,
});
// Create a new database, using the subnet and cluster groups.
const db = new aws.rds.Instance("db", {
engine: "postgres",
instanceClass: aws.rds.InstanceTypes.T3_Micro,
allocatedStorage: 5,
dbSubnetGroupName: subnetGroup.id,
vpcSecurityGroupIds: cluster.securityGroups.map(g => g.id),
name: dbName,
username: dbUsername,
password: dbPassword,
skipFinalSnapshot: true,
});
// Assemble a connection string for the Miniflux service.
const connectionString = pulumi.interpolate `postgres://${dbUsername}:${dbPassword}@${db.endpoint}/miniflux?sslmode=disable`;
// Create an NetworkListener to forward HTTP traffic on port 8080.
const listener = new awsx.lb.NetworkListener("lb", { port: 8080 });
// Create a Fargate service consisting of just one container instance (since that's all we
{
protocol: "-1",
fromPort: 0,
toPort: 0,
cidrBlocks: ["0.0.0.0/0"],
}
]
});
// example only, you should change this
const dbName = "hellosql";
const dbUser = "hellosql";
const dbPass = "hellosql";
// provision our db
const cluster = new aws.rds.Cluster("db", {
engine: aws.rds.EngineType.AuroraMysql,
engineVersion: "5.7.mysql_aurora.2.03.2",
databaseName: dbName,
masterUsername: dbUser,
masterPassword: dbPass,
skipFinalSnapshot: true,
dbSubnetGroupName: subnetGroup.name,
vpcSecurityGroupIds: [securityGroup.id],
});
const clusterInstance = new aws.rds.ClusterInstance("dbInstance", {
clusterIdentifier: cluster.clusterIdentifier,
instanceClass: aws.rds.InstanceType.T3_Small,
engine: aws.rds.EngineType.AuroraMysql,
engineVersion: "5.7.mysql_aurora.2.03.2",
publiclyAccessible: true,
subnetIds: vpc.publicSubnetIds,
templateParameters: {
minSize: 20,
},
launchConfigurationArgs: {
instanceType: "t2.xlarge",
},
});
const securityGroupIds = cluster.securityGroups.map(g => g.id);
const dbSubnets = new aws.rds.SubnetGroup("dbsubnets", {
subnetIds: vpc.publicSubnetIds,
});
const db = new aws.rds.Instance("postgresdb", {
engine: "postgres",
instanceClass: "db.t2.micro",
allocatedStorage: 20,
dbSubnetGroupName: dbSubnets.id,
vpcSecurityGroupIds: securityGroupIds,
name: "airflow",
username: "airflow",
password: dbPassword,
skipFinalSnapshot: true,
});
const cacheSubnets = new aws.elasticache.SubnetGroup("cachesubnets", {