Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def complete_queue_name(**kwargs):
return [q["jobQueueName"] for q in paginate(clients.batch.get_paginator("describe_job_queues"))]
def complete_ce_name(**kwargs):
return [c["computeEnvironmentName"] for c in paginate(clients.batch.get_paginator("describe_compute_environments"))]
def batch(args):
batch_parser.print_help()
batch_parser = register_parser(batch, help="Manage AWS Batch resources", description=__doc__)
def queues(args):
page_output(tabulate(paginate(clients.batch.get_paginator("describe_job_queues")), args))
parser = register_listing_parser(queues, parent=batch_parser, help="List Batch queues")
def create_queue(args):
ces = [dict(computeEnvironment=e, order=i) for i, e in enumerate(args.compute_environments)]
logger.info("Creating queue %s in %s", args.name, ces)
queue = clients.batch.create_job_queue(jobQueueName=args.name, priority=args.priority, computeEnvironmentOrder=ces)
make_waiter(clients.batch.describe_job_queues, "jobQueues[].status", "VALID", "pathAny").wait(jobQueues=[args.name])
return queue
parser = register_parser(create_queue, parent=batch_parser, help="Create a Batch queue")
parser.add_argument("name")
parser.add_argument("--priority", type=int, default=5)
parser.add_argument("--compute-environments", nargs="+", required=True)
def delete_queue(args):
clients.batch.update_job_queue(jobQueue=args.name, state="DISABLED")
make_waiter(clients.batch.describe_job_queues, "jobQueues[].status", "VALID", "pathAny").wait(jobQueues=[args.name])
table = []
describe_repositories_args = dict(repositoryNames=args.repositories) if args.repositories else {}
for repo in paginate(clients.ecr.get_paginator("describe_repositories"), **describe_repositories_args):
try:
res = clients.ecr.get_repository_policy(repositoryName=repo["repositoryName"])
repo["policy"] = json.loads(res["policyText"])
except clients.ecr.exceptions.RepositoryPolicyNotFoundException:
pass
orig_len = len(table)
for image in paginate(clients.ecr.get_paginator("describe_images"), repositoryName=repo["repositoryName"]):
table.append(dict(image, **repo))
if len(table) == orig_len:
table.append(repo)
page_output(tabulate(table, args))
parser = register_listing_parser(ls, parent=ecr_parser, help="List ECR repos and images")
parser.add_argument("repositories", nargs="*")
def ecr_image_name_completer(**kwargs):
return (r["repositoryName"] for r in paginate(clients.ecr.get_paginator("describe_repositories")))
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys
from . import register_parser
from .ls import register_listing_parser
from .util.printing import page_output, tabulate
from .util.aws import ARN, resolve_instance_id, resources, clients
def alarms(args):
page_output(tabulate(resources.cloudwatch.alarms.all(), args))
parser = register_listing_parser(alarms, help="List CloudWatch alarms")
def put_alarm(args):
sns = resources.sns
logs = clients.logs
cloudwatch = clients.cloudwatch
topic = sns.create_topic(Name=args.alarm_name)
topic.subscribe(Protocol="email", Endpoint=args.email)
logs.put_metric_filter(logGroupName=args.log_group_name,
filterName=args.alarm_name,
filterPattern=args.pattern,
metricTransformations=[dict(metricName=args.alarm_name,
metricNamespace=__name__,
metricValue="1")])
cloudwatch.put_metric_alarm(AlarmName=args.alarm_name,
MetricName=args.alarm_name,
Namespace=__name__,
def secrets(args):
secrets_parser.print_help()
secrets_parser = register_parser(secrets, help="Manage application credentials (secrets)", description=__doc__)
def ls(args):
list_secrets_paginator = Paginator(method=clients.secretsmanager.list_secrets,
pagination_config=dict(result_key="SecretList",
input_token="NextToken",
output_token="NextToken",
limit_key="MaxResults"),
model=None)
page_output(tabulate(paginate(list_secrets_paginator), args))
ls_parser = register_listing_parser(ls, parent=secrets_parser)
def put(args):
if args.generate_ssh_key:
ssh_key = new_ssh_key()
buf = StringIO()
ssh_key.write_private_key(buf)
secret_value = buf.getvalue()
elif args.secret_name in os.environ:
secret_value = os.environ[args.secret_name]
else:
secret_value = sys.stdin.read()
try:
res = clients.secretsmanager.create_secret(Name=args.secret_name, SecretString=secret_value)
except clients.secretsmanager.exceptions.ResourceExistsException:
res = clients.secretsmanager.put_secret_value(SecretId=args.secret_name, SecretString=secret_value)
if parse_principal(args):
def describe_access_keys(cell):
return ", ".join([k.id + ": " + k.status for k in cell.all()])
users = list(resources.iam.users.all())
for user in users:
user.cur, user.mfa = "", ""
cell_transforms = {
"cur": mark_cur_user,
"policies": get_policies_for_principal,
"mfa": describe_mfa,
"access_keys": describe_access_keys
}
page_output(tabulate(users, args, cell_transforms=cell_transforms))
parser = register_listing_parser(users, parent=iam_parser, help="List IAM users")
def groups(args):
page_output(tabulate(resources.iam.groups.all(), args, cell_transforms={"policies": get_policies_for_principal}))
parser = register_listing_parser(groups, parent=iam_parser, help="List IAM groups")
def roles(args):
page_output(tabulate(resources.iam.roles.all(), args, cell_transforms={"policies": get_policies_for_principal}))
parser = register_listing_parser(roles, parent=iam_parser, help="List IAM roles")
def policies(args):
page_output(tabulate(resources.iam.policies.all(), args))
parser = register_listing_parser(policies, parent=iam_parser, help="List IAM policies")
def complete_cluster_name(**kwargs):
return [ARN(c).resource.partition("/")[2] for c in paginate(clients.ecs.get_paginator("list_clusters"))]
def ecs(args):
ecs_parser.print_help()
ecs_parser = register_parser(ecs, help="Manage Elastic Container Service resources", description=__doc__)
def clusters(args):
if not args.clusters:
args.clusters = list(paginate(clients.ecs.get_paginator("list_clusters")))
cluster_desc = clients.ecs.describe_clusters(clusters=args.clusters)["clusters"]
page_output(tabulate(cluster_desc, args))
parser = register_listing_parser(clusters, parent=ecs_parser, help="List ECS clusters")
parser.add_argument("clusters", nargs="*").completer = complete_cluster_name
def tasks(args):
list_clusters = clients.ecs.get_paginator("list_clusters")
list_tasks = clients.ecs.get_paginator("list_tasks")
def list_tasks_worker(worker_args):
cluster, status = worker_args
return cluster, status, list(paginate(list_tasks, cluster=cluster, desiredStatus=status))
def describe_tasks_worker(t, cluster=None):
return clients.ecs.describe_tasks(cluster=cluster, tasks=t)["tasks"] if t else []
task_descs = []
if args.clusters is None:
args.clusters = [__name__.replace(".", "_")] if args.tasks else list(paginate(list_clusters))
row.update(alias=dns_aliases.get(row["DNSName"]), type="ELB")
if args.elbs and row["LoadBalancerName"] not in args.elbs and (row["alias"] or "").rstrip(".") not in args.elbs:
continue
instances = clients.elb.describe_instance_health(LoadBalancerName=row["LoadBalancerName"])["InstanceStates"]
table.extend([dict(row, **instance) for instance in instances] if instances else [row])
for row in paginate(clients.elbv2.get_paginator("describe_load_balancers")):
row.update(alias=dns_aliases.get(row["DNSName"]), type="ALB")
if args.elbs and row["LoadBalancerName"] not in args.elbs and (row["alias"] or "").rstrip(".") not in args.elbs:
continue
target_groups = clients.elbv2.describe_target_groups(LoadBalancerArn=row["LoadBalancerArn"])["TargetGroups"]
for tg in target_groups:
targets = get_targets(tg)
table.extend([dict(row, **target) for target in targets] if targets else [row])
page_output(tabulate(table, args, cell_transforms={"SecurityGroups": lambda x, r: ", ".join(map(sgid_to_name, x))}))
parser = register_listing_parser(ls, parent=elb_parser, help="List ELBs")
parser.add_argument("elbs", nargs="*")
def get_target_group(alb_name, target_group_name):
alb = clients.elbv2.describe_load_balancers(Names=[alb_name])["LoadBalancers"][0]
target_groups = clients.elbv2.describe_target_groups(LoadBalancerArn=alb["LoadBalancerArn"])["TargetGroups"]
for target_group in target_groups:
if target_group["TargetGroupName"] == target_group_name:
return dict(alb, **target_group)
m = "Target group {} not found in {} (target groups found: {})"
raise AegeaException(m.format(target_group_name, alb_name, ", ".join(t["TargetGroupName"] for t in target_groups)))
def get_targets(target_group):
res = clients.elbv2.describe_target_health(TargetGroupArn=target_group["TargetGroupArn"])
return res["TargetHealthDescriptions"]
def register(args):
def _lambda(args):
lambda_parser.print_help()
lambda_parser = register_parser(_lambda, name="lambda")
def ls(args):
paginator = getattr(clients, "lambda").get_paginator("list_functions")
page_output(tabulate(paginate(paginator), args, cell_transforms={"LastModified": Timestamp}))
parser_ls = register_listing_parser(ls, parent=lambda_parser, help="List AWS Lambda functions")
def event_source_mappings(args):
paginator = getattr(clients, "lambda").get_paginator("list_event_source_mappings")
page_output(tabulate(paginate(paginator), args))
parser_event_source_mappings = register_listing_parser(event_source_mappings, parent=lambda_parser,
help="List event source mappings")
def update_code(args):
with open(args.zip_file, "rb") as fh:
payload = fh.read()
payload_sha = hashlib.sha256(payload).digest()
res = getattr(clients, "lambda").update_function_code(FunctionName=args.function_name, ZipFile=payload)
assert base64.b64decode(res["CodeSha256"]) == payload_sha
return res
update_code_parser = register_parser(update_code, parent=lambda_parser, help="Update function code")
update_code_parser.add_argument("function_name")
update_code_parser.add_argument("zip_file")
def update_config(args):
update_args = dict(FunctionName=args.function_name)
parser = register_listing_parser(users, parent=iam_parser, help="List IAM users")
def groups(args):
page_output(tabulate(resources.iam.groups.all(), args, cell_transforms={"policies": get_policies_for_principal}))
parser = register_listing_parser(groups, parent=iam_parser, help="List IAM groups")
def roles(args):
page_output(tabulate(resources.iam.roles.all(), args, cell_transforms={"policies": get_policies_for_principal}))
parser = register_listing_parser(roles, parent=iam_parser, help="List IAM roles")
def policies(args):
page_output(tabulate(resources.iam.policies.all(), args))
parser = register_listing_parser(policies, parent=iam_parser, help="List IAM policies")
def generate_password(length=16):
while True:
password = [random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(length)]
password.insert(8, "-")
if not any(c in string.ascii_uppercase for c in password):
continue
if not any(c in string.ascii_lowercase for c in password):
continue
if not any(c in string.digits for c in password):
continue
return ''.join(password)
def create_user(args):
if args.prompt_for_password:
from getpass import getpass
def efs(args):
efs_parser.print_help()
efs_parser = register_parser(efs, help="Manage Elastic Filesystem resources", description=__doc__)
def ls(args):
table = []
for filesystem in clients.efs.describe_file_systems()["FileSystems"]:
for mount_target in clients.efs.describe_mount_targets(FileSystemId=filesystem["FileSystemId"])["MountTargets"]:
mount_target.update(filesystem)
table.append(mount_target)
args.columns += args.mount_target_columns
page_output(tabulate(table, args, cell_transforms={"SizeInBytes": lambda x, r: x.get("Value") if x else None}))
parser = register_listing_parser(ls, parent=efs_parser, help="List EFS filesystems")
parser.add_argument("--mount-target-columns", nargs="+")
def create(args):
vpc = ensure_vpc()
if args.security_groups is None:
args.security_groups = [__name__]
ensure_security_group(__name__, vpc, tcp_ingress=[dict(port=socket.getservbyname("nfs"),
source_security_group_name=__name__)])
creation_token = base64.b64encode(bytearray(os.urandom(24))).decode()
args.tags.append("Name=" + args.name)
create_file_system_args = dict(CreationToken=creation_token,
PerformanceMode=args.performance_mode,
ThroughputMode=args.throughput_mode,
Tags=encode_tags(args.tags))
if args.throughput_mode == "provisioned":
create_file_system_args.update(ProvisionedThroughputInMibps=args.provisioned_throughput_in_mibps)