Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
// Copyright 2016-2019, Pulumi Corporation. All rights reserved.
import * as k8s from "@pulumi/kubernetes";
import * as pulumi from "@pulumi/pulumi";
// Minikube does not implement services of type `LoadBalancer`; require the user to specify if we're
// running on minikube, and if so, create only services of type ClusterIP.
const config = new pulumi.Config();
const isMinikube = config.require("isMinikube");
// nginx container, replicated 1 time.
const appName = "nginx";
const appLabels = { app: appName };
const nginx = new k8s.apps.v1.Deployment(appName, {
spec: {
selector: { matchLabels: appLabels },
replicas: 1,
template: {
metadata: { labels: appLabels },
spec: { containers: [{ name: appName, image: "nginx:1.15-alpine" }] },
},
},
});
// Allocate an IP to the nginx Deployment.
const frontend = new k8s.core.v1.Service(appName, {
metadata: { labels: nginx.spec.template.metadata.labels },
spec: {
type: isMinikube === "true" ? "ClusterIP" : "LoadBalancer",
ports: [{ port: 80, targetPort: 80, protocol: "TCP" }],
// Existing Pulumi stack reference in the format:
// // e.g. "myUser/myProject/dev"
const clusterStackRef = new pulumi.StackReference(pulumiConfig.require("clusterStackRef"));
// Get the kubeconfig from the cluster stack output.
const kubeconfig = clusterStackRef.getOutput("kubeconfig").apply(JSON.stringify);
// Create the k8s provider with the kubeconfig.
const provider = new k8s.Provider("k8sProvider", {kubeconfig});
const ns = new k8s.core.v1.Namespace("app-ns", {
metadata: { name: "joe-duffy" },
}, {provider});
const appLabels = { app: "iac-workshop" };
const deployment = new k8s.apps.v1.Deployment("app-dep", {
metadata: { namespace: ns.metadata.name },
spec: {
selector: { matchLabels: appLabels },
replicas: 1,
template: {
metadata: { labels: appLabels },
spec: {
containers: [{
name: "iac-workshop",
image: "gcr.io/google-samples/kubernetes-bootcamp:v1",
}],
},
},
},
}, {provider});
files: ["default.conf"],
policy: s3Helpers.publicReadPolicy,
});
const bucketId = nginxConfigBucket.fileIdFromHashedContents("default.conf");
// The URL at which the nginx config is stored.
export const nginxConfigUrl = nginxConfigBucket.getUrlForFile("default.conf");
// Shared volume where the init containers will get the nginx config data from S3, and the nginx
// container will pick it up to initialize itself.
const nginxConfigVol = { name: bucketId, emptyDir: {} };
const nginxConfigMount = { name: nginxConfigVol.name, mountPath: "/etc/nginx/conf.d" };
// Deploy 1 replica of nginx. Use `curl` to get `default.conf` from a public S3 bucket, which
// configures nginx to act as a proxy for `pulumi.github.io`.
const nginx = new k8s.apps.v1.Deployment("nginx", {
spec: {
replicas: 1,
template: {
metadata: { labels: { app: "nginx" } },
spec: {
// `curl` the nginx configuration out of `nginxConfigUrl`, the public S3 bucket.
// Place in a volume shared with the `nginx` container.
initContainers: [util.curl(nginxConfigUrl, "default.conf", nginxConfigMount)],
// nginx container, picks up the configuration file automatically.
containers: [
{
image: "nginx:1.13.6-alpine",
name: "nginx",
volumeMounts: [nginxConfigMount],
},
],
constructor(name: string,
args: DemoAppArgs,
opts: pulumi.ComponentResourceOptions = {}) {
super("examples:kubernetes-ts-multicloud:demo-app", name, args, opts);
// Create the kuard Deployment.
const appLabels = {app: "kuard"};
const deployment = new k8s.apps.v1.Deployment(`${name}-demo-app`, {
spec: {
selector: {matchLabels: appLabels},
replicas: 1,
template: {
metadata: {labels: appLabels},
spec: {
containers: [
{
name: "kuard",
image: `gcr.io/kuar-demo/kuard-amd64:${args.imageTag}`,
ports: [{containerPort: 8080, name: "http"}],
livenessProbe: {
httpGet: {path: "/healthy", port: "http"},
initialDelaySeconds: 5,
timeoutSeconds: 1,
periodSeconds: 10,
kubeconfig: config.kubeconfig,
namespace: config.appsNamespaceName,
});
// Create a Secret with the database credentials.
const databaseSecret = new k8s.core.v1.Secret("db-secret", {
stringData: {
"database-username": config.databaseUsername,
"database-password": config.databasePassword,
}
}, { provider: provider });
// Create a Deployment that uses the database credentials as environment variables.
const appName = "nginx";
const appLabels = { app: appName };
const nginx = new k8s.apps.v1.Deployment(appName, {
metadata: { labels: appLabels },
spec: {
selector: {
matchLabels: appLabels,
},
replicas: 1,
template: {
metadata: { labels: appLabels },
spec: {
containers: [
{
image: "nginx",
name: "nginx",
env: [
{
name: "DATABASE_USERNAME",
});
// Create a Kubernetes provider instance that uses our cluster from above.
const clusterProvider = new k8s.Provider(name, {
kubeconfig: kubeconfig,
});
// Create a Kubernetes Namespace
const ns = new k8s.core.v1.Namespace(name, {}, { provider: clusterProvider });
// Export the Namespace name
export const namespaceName = ns.metadata.name;
// Create a NGINX Deployment
const appLabels = { appClass: name };
const deployment = new k8s.apps.v1.Deployment(name,
{
metadata: {
namespace: namespaceName,
labels: appLabels,
},
spec: {
replicas: 1,
selector: { matchLabels: appLabels },
template: {
metadata: {
labels: appLabels,
},
spec: {
containers: [
{
name: name,
const p8sService = prometheus.getResource("v1/Service", "p8s-prometheus-server");
const p8sDeployment = prometheus.getResource(
"extensions/v1beta1/Deployment",
"p8s-prometheus-server",
);
// IMPORTANT: This forwards the Prometheus service to localhost, so we can check it. If you are
// running in-cluster, you probably don't need this!
const localPort = 9090;
const forwarderHandle = util.forwardPrometheusService(p8sService, p8sDeployment, {
localPort,
});
// Canary ring. Replicate instrumented Pod 3 times.
const canary = new k8s.apps.v1beta1.Deployment(
"canary-example-app",
{ spec: { replicas: 1, template: instrumentedPod } },
{ dependsOn: p8sDeployment },
);
// Staging ring. Replicate instrumented Pod 10 times.
const staging = new k8s.apps.v1beta1.Deployment("staging-example-app", {
metadata: {
annotations: {
// Check P90 latency is < 100,000 microseconds. Returns a `Promise` with the P90
// response time. It must resolve correctly before this deployment rolls out. In
// general any `Promise` could go here.
"example.com/p90ResponseTime": util.checkHttpLatency(canary, containerName, {
durationSeconds: 60,
quantile: 0.9,
thresholdMicroseconds: 100000,
import * as k8s from "@pulumi/kubernetes";
import * as pulumi from "@pulumi/pulumi";
const ns = new k8s.core.v1.Namespace("app-ns", {
metadata: { name: "joe-duffy" },
});
const appLabels = { app: "iac-workshop" };
const deployment = new k8s.apps.v1.Deployment("app-dep", {
metadata: { namespace: ns.metadata.name },
spec: {
selector: { matchLabels: appLabels },
replicas: 3,
template: {
metadata: { labels: appLabels },
spec: {
containers: [{
name: "iac-workshop",
image: "jocatalin/kubernetes-bootcamp:v2",
}],
},
},
},
});
},
initialDelaySeconds: 90,
timeoutSeconds: 120,
periodSeconds: 10
},
*/
ports: [
{
name: 'http',
containerPort: config.frontendPort,
protocol: 'TCP'
}
]
};
const deployment = new k8s.apps.v1.Deployment(
name,
{
metadata: {
namespace: namespaceName,
labels: appLabels
},
spec: {
replicas: 1,
selector: { matchLabels: appLabels },
template: {
metadata: {
labels: appLabels
},
spec: {
containers: [container]
}
export function createDeployment(
name: string,
args: EchoserverDeploymentArgs,
): k8s.apps.v1.Deployment {
return new k8s.apps.v1.Deployment(name,
{
metadata: {
labels: args.labels,
namespace: args.namespace,
},
spec: {
replicas: args.replicas,
selector: { matchLabels: args.labels },
template: {
metadata: { labels: args.labels, namespace: args.namespace },
spec: {
restartPolicy: "Always",
containers: [
{
name: name,
image: "gcr.io/google-containers/echoserver:1.5",