Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
vpc = ec2.Vpc('test',
cidr_block="10.11.0.0/16",
enable_dns_hostnames=True,
enable_dns_support=True)
internet_gateway = ec2.InternetGateway('test',
vpc_id=vpc.id)
route_table = ec2.RouteTable('test',
vpc_id=vpc.id,
routes=[ec2.RouteTableRouteArgs(
cidr_block="0.0.0.0/0",
gateway_id=internet_gateway.id
)])
zones = Output.from_input(get_availability_zones())
zone_names = zones.apply(
lambda zs: zs.names)
subnet0 = ec2.Subnet("test0",
vpc_id=vpc.id,
availability_zone=zone_names.apply(lambda names: names[0]),
cidr_block="10.11.0.0/24",
map_public_ip_on_launch=True)
subnet1 = ec2.Subnet("test1",
vpc_id=vpc.id,
availability_zone=zone_names.apply(lambda names: names[1]),
cidr_block="10.11.1.0/24",
map_public_ip_on_launch=True)
route_table_association0 = ec2.RouteTableAssociation('test0',
raise TypeError('Expected config to be a ChartOpts or LocalChartOpts instance')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if config.resource_prefix:
release_name = f"{config.resource_prefix}-{release_name}"
super(Chart, self).__init__(
"kubernetes:helm.sh/v2:Chart",
release_name,
__props__,
opts)
all_config = pulumi.Output.from_input((release_name, config, pulumi.ResourceOptions(parent=self)))
# Note: Unlike NodeJS, Python requires that we "pull" on our futures in order to get them scheduled for
# execution. In order to do this, we leverage the engine's RegisterResourceOutputs to wait for the
# resolution of all resources that this Helm chart created.
self.resources = all_config.apply(_parse_chart)
self.register_outputs({"resources": self.resources})
},
service_principal={
"client_id": ad_app.application_id,
"client_secret": ad_sp_password.value
},
location=config["location"],
default_node_pool={
"name": "aksagentpool",
"node_count": config["node_count"],
"vm_size": config["node_size"],
},
dns_prefix="sample-kube",
)
cluster_names.append(cluster.name)
export("aks_cluster_names", Output.all(cluster_names))
flask_image = docker.Image("flask-dockerimage",
image_name=app_ecr_repo.repository_url,
build="./frontend",
skip_push=False,
registry=app_registry)
# Creating a task definition for the Flask instance.
flask_task_definition = aws.ecs.TaskDefinition("flask-task-definition",
family="frontend-task-definition-family",
cpu="256",
memory="512",
network_mode="awsvpc",
requires_compatibilities=["FARGATE"],
execution_role_arn=app_exec_role.arn,
task_role_arn=app_task_role.arn,
container_definitions=pulumi.Output.all(flask_image.image_name,
redis_endpoint).apply(
lambda args: json.dumps([{
"name": "flask-container",
"image": args[0],
"memory": 512,
"essential": True,
"portMappings": [{
"containerPort": 80,
"hostPort": 80,
"protocol": "tcp"
}],
"environment": [
# The Redis endpoint we created is given to Flask, allowing it to communicate with the former
{"name": "REDIS", "value": args[1]["host"]},
{"name": "REDIS_PORT", "value": str(args[1]["port"])},
{"name": "REDIS_PWD", "value": redis_password},
class ComponentStatus(pulumi.CustomResource):
api_version: pulumi.Output[str]
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
conditions: pulumi.Output[list]
"""
List of component conditions observed
"""
kind: pulumi.Output[str]
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
metadata: pulumi.Output[dict]
"""
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
def __init__(__self__, resource_name, opts=None, api_version=None, conditions=None, kind=None, metadata=None, __props__=None, __name__=None, __opts__=None):
"""
ComponentStatus (and ComponentStatusList) holds the cluster validation info.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[list] conditions: List of component conditions observed
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input[dict] metadata: Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
objs += _parse_yaml_object(item, opts, transformations, resource_prefix)
return objs
if "metadata" not in obj or "name" not in obj["metadata"]:
raise Exception("YAML object does not have a .metadata.name: {}/{} {}".format(
api_version, kind, json.dumps(obj)))
# Convert obj keys to Python casing
for key in list(obj.keys()):
new_key = _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(key) or key
if new_key != key:
obj[new_key] = obj.pop(key)
metadata = obj["metadata"]
spec = obj.get("spec")
identifier: pulumi.Output = pulumi.Output.from_input(metadata["name"])
if "namespace" in metadata:
identifier = pulumi.Output.from_input(metadata).apply(
lambda metadata: f"{metadata['namespace']}/{metadata['name']}")
if resource_prefix:
identifier = pulumi.Output.from_input(identifier).apply(
lambda identifier: f"{resource_prefix}-{identifier}")
gvk = f"{api_version}/{kind}"
if gvk == "admissionregistration.k8s.io/v1/MutatingWebhookConfiguration":
# Import locally to avoid name collisions.
from pulumi_kubernetes.admissionregistration.v1 import MutatingWebhookConfiguration
return [identifier.apply(
lambda x: (f"admissionregistration.k8s.io/v1/MutatingWebhookConfiguration:{x}",
MutatingWebhookConfiguration(f"{x}", opts, **obj)))]
if gvk == "admissionregistration.k8s.io/v1/MutatingWebhookConfigurationList":
# Import locally to avoid name collisions.
# *** WARNING: this file was generated by the Pulumi Kubernetes codegen tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
from typing import Any, Callable, Optional, Sequence, Tuple, Union
import pulumi.runtime
from pulumi_kubernetes.yaml import _parse_yaml_document
from ... import _utilities
class Chart(pulumi.ComponentResource):
resources: pulumi.Output[dict]
"""
Kubernetes resources contained in this Chart.
"""
def __init__(self,
release_name: str,
config: Union['ChartOpts', 'LocalChartOpts'],
opts: Optional[pulumi.ResourceOptions] = None):
"""
Chart is a component representing a collection of resources described by an arbitrary Helm
Chart. The Chart can be fetched from any source that is accessible to the `helm` command
line. Values in the `values.yml` file can be overridden using `ChartOpts.values` (equivalent
to `--set` or having multiple `values.yml` files). Objects can be transformed arbitrarily by
supplying callbacks to `ChartOpts.transformations`.
Chart does not use Tiller. The Chart specified is copied and expanded locally; the semantics
sql_server = sql.SqlServer(
"sqlserver",
resource_group_name=resource_group.name,
administrator_login_password=administrator_login_password,
administrator_login="manualadmin",
version="12.0")
database = sql.Database(
"sqldb",
resource_group_name=resource_group.name,
server_name=sql_server.name,
requested_service_objective_name="S0")
connection_string = Output.all(sql_server.name, database.name) \
.apply(lambda args: f"Server=tcp:{args[0]}.database.windows.net;Database={args[1]};") or "1111"
text_blob = storage.Blob(
"text",
storage_account_name=storage_account.name,
storage_container_name=container.name,
type="block",
source="./README.md"
)
app_service_plan = appservice.Plan(
"asp",
resource_group_name=resource_group.name,
kind="App",
sku={
"tier": "Basic",
for file in _files:
cf = ConfigFile(
file, file_id=file, transformations=transformations, resource_prefix=resource_prefix, opts=opts)
# Add any new ConfigFile resources to the ConfigGroup's resources
self.resources = pulumi.Output.all(cf.resources, self.resources).apply(lambda x: {**x[0], **x[1]})
for text in yaml:
# Rather than using the default provider for the following invoke call, use the version specified
# in package.json.
invoke_opts = pulumi.InvokeOptions(version=_utilities.get_version())
__ret__ = pulumi.runtime.invoke('kubernetes:yaml:decode', {'text': text}, invoke_opts).value['result']
resources = _parse_yaml_document(__ret__, opts, transformations, resource_prefix)
# Add any new YAML resources to the ConfigGroup's resources
self.resources = pulumi.Output.all(resources, self.resources).apply(lambda x: {**x[0], **x[1]})
# Note: Unlike NodeJS, Python requires that we "pull" on our futures in order to get them scheduled for
# execution. In order to do this, we leverage the engine's RegisterResourceOutputs to wait for the
# resolution of all resources that this YAML document created.
self.register_outputs({"resources": self.resources})
website={
"index_document": "index.html"
})
for file in os.listdir(site_dir):
filepath = os.path.join(site_dir, file)
mime_type, _ = mimetypes.guess_type(filepath)
obj = aws.s3.BucketObject(file,
bucket=bucket.name,
source=pulumi.FileAsset(filepath),
acl="public_read",
content_type=mime_type
)
pulumi.export('bucket_name', bucket.bucket)
pulumi.export('bucket_endpoint', pulumi.Output.concat("http://", bucket.website_endpoint))