Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
function getOrCreateGlobalRegistry(): azure.containerservice.Registry {
if (!globalRegistry) {
globalRegistry = new azure.containerservice.Registry("global", {
resourceGroupName: shared.globalResourceGroupName,
location: shared.location,
// We need the admin account enabled so that we can grab the name/password to send to
// docker. We could consider an approach whereby this was not enabled, but it was
// conditionally enabled/disabled on demand when needed.
adminEnabled: true,
sku: "Standard",
}, { parent: shared.getGlobalInfrastructureResource() });
}
return globalRegistry;
}// Create a Virtual Network for the cluster
const vnet = new azure.network.VirtualNetwork("keda", {
resourceGroupName: args.resourceGroupName,
addressSpaces: ["10.2.0.0/16"],
}, { parent: this });
// Create a Subnet for the cluster
const subnet = new azure.network.Subnet("keda", {
resourceGroupName: args.resourceGroupName,
virtualNetworkName: vnet.name,
addressPrefix: "10.2.1.0/24",
}, { parent: this });
// Now allocate an AKS cluster.
this.cluster = new azure.containerservice.KubernetesCluster("aksCluster", {
resourceGroupName: args.resourceGroupName,
defaultNodePool: {
name: "aksagentpool",
nodeCount: args.vmCount,
vmSize: args.vmSize,
osDiskSizeGb: 30,
vnetSubnetId: subnet.id,
},
dnsPrefix: name,
linuxProfile: {
adminUsername: "aksuser",
sshKey: {
keyData: sshPublicKey,
},
},
servicePrincipal: {const k8sClusters = aksClusterConfig.map((perClusterConfig, index) => {
const cluster = new azure.containerservice.KubernetesCluster(`aksCluster-${perClusterConfig.name}`, {
// Global config arguments
resourceGroupName: config.resourceGroup.name,
linuxProfile: {
adminUsername: "aksuser",
sshKey: {
keyData: config.sshPublicKey,
},
},
servicePrincipal: {
clientId: adApp.applicationId,
clientSecret: adSpPassword.value,
},
// Per-cluster config arguments
location: perClusterConfig.location,
agentPoolProfiles: [{
name: "aksagentpool",import * as pulumi from "@pulumi/pulumi";
// Create an Azure Resource Group
const resourceGroup = new azure.core.ResourceGroup("jenkins-tutorial-group");
const appServicePlan = new azure.appservice.Plan("appservice-plan", {
kind: "Linux",
resourceGroupName: resourceGroup.name,
reserved: true,
sku: {
tier: "Basic",
size: "B1",
},
});
const registry = new azure.containerservice.Registry("myacr", {
resourceGroupName: resourceGroup.name,
sku: "Basic",
adminEnabled: true,
});
const customImage = "spring-boot-greeting-app";
const myImage = new docker.Image(customImage, {
imageName: pulumi.interpolate`${registry.loginServer}/${customImage}:v1.0.0`,
build: {
context: `../`,
},
registry: {
server: registry.loginServer,
username: registry.adminUsername,
password: registry.adminPassword,
},// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import * as azure from "@pulumi/azure";
import * as pulumi from "@pulumi/pulumi";
import * as config from "./config";
// Now allocate an AKS cluster.
const k8sCluster = new azure.containerservice.KubernetesCluster("aksCluster", {
resourceGroupName: config.resourceGroupName,
location: config.location,
agentPoolProfile: {
name: "aksagentpool",
count: config.nodeCount,
vmSize: config.nodeSize,
},
dnsPrefix: `${pulumi.getStack()}-kube`,
linuxProfile: {
adminUsername: "aksuser",
sshKeys: [
{
keyData: config.sshPublicKey,
},
],
},import * as azuread from "@pulumi/azuread";
import * as k8s from "@pulumi/kubernetes";
import * as pulumi from "@pulumi/pulumi";
import * as config from "./config";
// Create the AD service principal for the K8s cluster.
const adApp = new azuread.Application("aks");
const adSp = new azuread.ServicePrincipal("aksSp", { applicationId: adApp.applicationId });
const adSpPassword = new azuread.ServicePrincipalPassword("aksSpPassword", {
servicePrincipalId: adSp.id,
value: config.password,
endDate: "2099-01-01T00:00:00Z",
});
// Now allocate an AKS cluster.
export const k8sCluster = new azure.containerservice.KubernetesCluster("aksCluster", {
resourceGroupName: config.resourceGroup.name,
location: config.location,
defaultNodePool: {
name: "aksagentpool",
nodeCount: config.nodeCount,
vmSize: config.nodeSize,
},
dnsPrefix: `${pulumi.getStack()}-kube`,
linuxProfile: {
adminUsername: "aksuser",
sshKey: {
keyData: config.sshPublicKey,
},
},
servicePrincipal: {
clientId: adApp.applicationId,constructor(name: string,
args: KedaServiceArgs,
opts: pulumi.ComponentResourceOptions = {}) {
super("examples:keda:KedaEdge", name, args, opts);
// Add a container registry to store custom images of Azure Functions
const registry = new azure.containerservice.Registry("registry", {
resourceGroupName: args.resourceGroup.name,
adminEnabled: true,
sku: "Premium",
}, { parent: this });
const dockercfg = pulumi.all([registry.loginServer, registry.adminUsername, registry.adminPassword])
.apply(([server, username, password]) => {
const r: any = {};
r[server] = {
email: "notneeded@notneeded.com",
username,
password,
};
return r;
});import * as pulumi from "@pulumi/pulumi";
import * as tls from "@pulumi/tls";
import { config } from "./config";
const name = pulumi.getProject();
// Create an SSH public key that will be used by the Kubernetes cluster.
// Note: We create one here to simplify the demo, but a production
// deployment would probably pass an existing key in as a variable.
const sshPublicKey = new tls.PrivateKey(`${name}-sshKey`, {
algorithm: "RSA",
rsaBits: 4096,
}).publicKeyOpenssh;
// Create the AKS cluster.
const cluster = new azure.containerservice.KubernetesCluster(`${name}`, {
resourceGroupName: config.resourceGroupName,
agentPoolProfiles: [{
name: "performant",
count: 3,
vmSize: "Standard_DS4_v2",
osType: "Linux",
osDiskSizeGb: 30,
vnetSubnetId: config.subnetId,
}, {
name: "standard",
count: 2,
vmSize: "Standard_B2s",
osType: "Linux",
osDiskSizeGb: 30,
vnetSubnetId: config.subnetId,
}],protocol: protocol,
image: imageOptions.image,
environmentVariables: imageOptions.environment,
commands: container.command,
});
credentials = credentials || (registry
? [{ password: registry.adminPassword, server: registry.loginServer, username: registry.adminUsername }]
: undefined);
if (targetPortNumber !== undefined) {
exposedPorts[containerName] = { [targetPortNumber]: hostPortNumber! };
}
}
const group = new azure.containerservice.Group(
name.replace(disallowedChars, "-"), {
containers: azureContainers,
location: shared.location,
resourceGroupName: shared.globalResourceGroupName,
osType: getOS(props),
imageRegistryCredentials: credentials,
ipAddressType: "Public",
}, { parent: parent });
const endpoints = getEndpoints(exposedPorts, group);
const defaultEndpoint = firstContainerName === undefined || firstContainerPort === undefined
? pulumi.output(undefined!)
: endpoints.apply(
ep => getEndpointHelper(ep));
return { group, endpoints, defaultEndpoint };siteConfig: {
alwaysOn: true,
linuxFxVersion: `DOCKER|${imageInDockerHub}`,
},
httpsOnly: true,
});
export const helloEndpoint = pulumi.interpolate`https://${helloApp.defaultSiteHostname}/hello`;
/**
* Scenario 2: deploying a custom image from Azure Container Registry.
*/
const customImage = "node-app";
const registry = new azure.containerservice.Registry("myregistry", {
resourceGroupName: resourceGroup.name,
sku: "Basic",
adminEnabled: true,
});
const myImage = new docker.Image(customImage, {
imageName: pulumi.interpolate`${registry.loginServer}/${customImage}:v1.0.0`,
build: {
context: `./${customImage}`,
},
registry: {
server: registry.loginServer,
username: registry.adminUsername,
password: registry.adminPassword,
},
});