Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if "AvailabilityZone" not in create_args:
if args.attach:
create_args["AvailabilityZone"] = get_metadata("placement/availability-zone")
else:
create_args["AvailabilityZone"] = ensure_subnet(ensure_vpc()).availability_zone
res = clients.ec2.create_volume(**create_args)
clients.ec2.get_waiter("volume_available").wait(VolumeIds=[res["VolumeId"]])
if args.attach:
try:
attach(parser_attach.parse_args([res["VolumeId"]], namespace=args))
except Exception:
print(json.dumps(res, indent=2, default=lambda x: str(x)))
raise
return res
parser_create = register_parser(create, parent=ebs_parser, help="Create an EBS volume")
parser_create.add_argument("--dry-run", action="store_true")
parser_create.add_argument("--snapshot-id")
parser_create.add_argument("--availability-zone")
parser_create.add_argument("--kms-key-id")
parser_create.add_argument("--tags", nargs="+", metavar="TAG_NAME=VALUE")
parser_create.add_argument("--attach", action="store_true",
help="Attach volume to this instance (only valid when running on EC2)")
def snapshot(args):
return clients.ec2.create_snapshot(DryRun=args.dry_run, VolumeId=args.volume_id)
parser_snapshot = register_parser(snapshot, parent=ebs_parser, help="Create an EBS snapshot")
parser_snapshot.add_argument("volume_id").completer = complete_volume_id
def attach_volume(args):
return clients.ec2.attach_volume(DryRun=args.dry_run,
VolumeId=args.volume_id,
table = []
rrs_cols = ["Name", "Type", "TTL"]
record_cols = ["Value"]
for zone in paginate(clients.route53.get_paginator("list_hosted_zones")):
if args.zones and zone["Name"] not in args.zones + [z + "." for z in args.zones]:
continue
for rrs in paginate(clients.route53.get_paginator("list_resource_record_sets"), HostedZoneId=zone["Id"]):
for record in rrs.get("ResourceRecords", [rrs.get("AliasTarget", {})]):
row = [rrs.get(f) for f in rrs_cols]
row += [record.get(f, record.get("DNSName")) for f in record_cols]
row += [get_field(zone, "Config.PrivateZone"), zone["Id"].rpartition("/")[-1]]
table.append(row)
column_names = rrs_cols + record_cols + ["Private", "Id"]
page_output(format_table(table, column_names=column_names, max_col_width=args.max_col_width))
parser = register_parser(ls, parent=zones_parser, help="List Route53 DNS zones and records")
parser.add_argument("zones", nargs="*")
def update(args):
return DNSZone(args.zone).update(*zip(*args.updates), record_type=args.record_type)
parser = register_parser(update, parent=zones_parser, help="Update Route53 DNS records")
parser.add_argument("zone")
parser.add_argument("updates", nargs="+", metavar="NAME=VALUE", type=lambda x: x.split("=", 1))
parser.add_argument("--record-type", default="CNAME")
def delete(args):
return DNSZone(args.zone).delete(name=args.name, record_type=args.record_type, missing_ok=False)
parser = register_parser(delete, parent=zones_parser, help="Delete Route53 DNS records")
parser.add_argument("zone")
parser.add_argument("name", help=r'Enter a "\052" literal to represent a wildcard.')
MasterUsername=args.master_username or getpass.getuser(),
MasterUserPassword=args.master_user_password,
VpcSecurityGroupIds=args.security_groups,
DBInstanceClass=args.db_instance_class,
Tags=encode_tags(args.tags),
CopyTagsToSnapshot=True)
if args.db_name:
create_args.update(DBName=args.db_name)
if args.engine_version:
create_args.update(EngineVersion=args.engine_version)
clients.rds.create_db_instance(**create_args)
clients.rds.get_waiter("db_instance_available").wait(DBInstanceIdentifier=args.name)
instance = clients.rds.describe_db_instances(DBInstanceIdentifier=args.name)["DBInstances"][0]
return {k: instance[k] for k in ("Endpoint", "DbiResourceId", "DBInstanceStatus")}
parser = register_parser(create, parent=rds_parser, help="Create an RDS instance")
parser.add_argument("name")
parser.add_argument("--db-name")
parser.add_argument("--engine")
parser.add_argument("--engine-version")
parser.add_argument("--storage", type=int)
parser.add_argument("--storage-type")
parser.add_argument("--master-username", "--username")
parser.add_argument("--master-user-password", "--password", required=True)
parser.add_argument("--db-instance-class")
parser.add_argument("--tags", nargs="+", default=[], metavar="TAG_NAME=VALUE")
parser.add_argument("--security-groups", nargs="+", default=[])
def delete(args):
clients.rds.delete_db_instance(DBInstanceIdentifier=args.name, SkipFinalSnapshot=True)
parser = register_parser(delete, parent=rds_parser, help="Delete an RDS instance")
tags = dict(tag.split("=", 1) for tag in args.tags)
add_tags(instance, Name=args.hostname, Owner=ARN.get_iam_username(),
SSHHostPublicKeyPart1=hkl[:255], SSHHostPublicKeyPart2=hkl[255:],
OwnerSSHKeyName=ssh_key_name, **tags)
if args.use_dns:
dns_zone.update(args.hostname, instance.private_dns_name)
while not instance.public_dns_name:
instance = resources.ec2.Instance(instance.id)
time.sleep(1)
add_ssh_host_key_to_known_hosts(hostkey_line([instance.public_dns_name], ssh_host_key))
if args.wait_for_ssh:
wait_for_port(instance.public_dns_name, 22)
logger.info("Launched %s in %s using %s", instance, subnet, args.ami)
return dict(instance_id=instance.id)
parser = register_parser(launch)
parser.add_argument("hostname")
parser.add_argument("--commands", nargs="+", metavar="COMMAND", help="Commands to run on host upon startup")
parser.add_argument("--packages", nargs="+", metavar="PACKAGE", help="APT packages to install on host upon startup")
parser.add_argument("--ssh-key-name")
parser.add_argument("--no-verify-ssh-key-pem-file", dest="verify_ssh_key_pem_file", action="store_false")
parser.add_argument("--bless-config", default=os.environ.get("BLESS_CONFIG"),
help="Path to a Bless configuration file (or pass via the BLESS_CONFIG environment variable)")
parser.add_argument("--ami", help="AMI to use for the instance. Default: " + resolve_ami.__doc__)
parser.add_argument("--ami-tags", nargs="+", metavar="NAME=VALUE", help="Use the most recent AMI with these tags")
parser.add_argument("--spot", action="store_true",
help="Launch a preemptible spot instance, which is cheaper but could be forced to shut down")
parser.add_argument("--duration-hours", type=float, help="Terminate the spot instance after this number of hours")
parser.add_argument("--cores", type=int, help="Minimum number of cores to request (spot fleet API)")
parser.add_argument("--min-mem-per-core-gb", type=float)
parser.add_argument("--instance-type", "-t").completer = instance_type_completer
parser.add_argument("--spot-price", type=float,
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, argparse
from . import register_parser
from .util import paginate
from .util.aws import clients
from .util.aws.dns import DNSZone
from .util.printing import get_field, page_output, format_table
def zones(args):
zones_parser.print_help()
zones_parser = register_parser(zones, help="Manage Route53 DNS zones", description=__doc__)
def ls(args):
table = []
rrs_cols = ["Name", "Type", "TTL"]
record_cols = ["Value"]
for zone in paginate(clients.route53.get_paginator("list_hosted_zones")):
if args.zones and zone["Name"] not in args.zones + [z + "." for z in args.zones]:
continue
for rrs in paginate(clients.route53.get_paginator("list_resource_record_sets"), HostedZoneId=zone["Id"]):
for record in rrs.get("ResourceRecords", [rrs.get("AliasTarget", {})]):
row = [rrs.get(f) for f in rrs_cols]
row += [record.get(f, record.get("DNSName")) for f in record_cols]
row += [get_field(zone, "Config.PrivateZone"), zone["Id"].rpartition("/")[-1]]
table.append(row)
column_names = rrs_cols + record_cols + ["Private", "Id"]
page_output(format_table(table, column_names=column_names, max_col_width=args.max_col_width))
auditor = Auditor()
auditor.__dict__.update(vars(args))
table = []
for method_name in natural_sort(dir(auditor)):
if method_name.startswith("audit"):
method = getattr(auditor, method_name)
try:
method()
table.append([GREEN("PASS"), method.__doc__])
except Exception as e:
logger.warn("%s: %s", method, e)
table.append([RED("FAIL"), method.__doc__])
# TODO: WHITE("NO TEST")
page_output(format_table(table, column_names=["Result", "Test"], max_col_width=120))
parser = register_parser(audit, help='Generate a security report using the CIS AWS Foundations Benchmark')
parser.add_argument('--email', help="Administrative contact email")
import os, sys, json, zipfile, gzip, csv, io, argparse
from io import BytesIO, TextIOWrapper
from datetime import datetime, timedelta
import dateutil
from botocore.exceptions import ClientError
from . import register_parser, config
from .util.exceptions import AegeaException
from .util.printing import format_table, page_output, get_field, get_cell, tabulate
from .util.aws import ARN, clients, resources, ensure_s3_bucket, IAMPolicyBuilder
def billing(args):
billing_parser.print_help()
billing_parser = register_parser(billing, help="Configure and view AWS cost and usage reports", description=__doc__)
def configure(args):
bucket_name = args.billing_reports_bucket.format(account_id=ARN.get_account_id())
bucket_policy = IAMPolicyBuilder(principal="arn:aws:iam::386209384616:root",
action=["s3:GetBucketAcl", "s3:GetBucketPolicy"],
resource="arn:aws:s3:::{}".format(bucket_name))
bucket_policy.add_statement(principal="arn:aws:iam::386209384616:root",
action=["s3:PutObject"],
resource="arn:aws:s3:::{}/*".format(bucket_name))
bucket = ensure_s3_bucket(bucket_name, policy=bucket_policy)
try:
clients.cur.put_report_definition(ReportDefinition=dict(ReportName=__name__,
TimeUnit="HOURLY",
Format="textORcsv",
Compression="GZIP",
S3Bucket=bucket.name,
num_amis = len(list(session.resource("ec2").images.filter(Owners=["self"])))
num_vpcs = len(list(session.resource("ec2").vpcs.all()))
num_enis = len(list(session.resource("ec2").network_interfaces.all()))
num_volumes = len(list(session.resource("ec2").volumes.all()))
except botocore.exceptions.ClientError:
num_instances, num_amis, num_vpcs, num_enis, num_volumes = ["Access denied"] * 5
return [region, num_instances, num_amis, num_vpcs, num_enis, num_volumes]
def top(args):
table = []
columns = ["Region", "Instances", "AMIs", "VPCs", "Network interfaces", "EBS volumes"]
executor = concurrent.futures.ThreadPoolExecutor()
table = list(executor.map(get_stats_for_region, boto3.Session().get_available_regions("ec2")))
page_output(format_table(table, column_names=columns, max_col_width=args.max_col_width))
parser = register_parser(top, help='Show an overview of AWS resources per region')
add_ssh_key_to_agent(get_instance(hostname).key_name)
if not username:
username = get_linux_username()
return [], username + "@" + resolve_instance_public_dns(hostname)
def ssh(args):
ssh_opts = ["-o", "ServerAliveInterval={}".format(args.server_alive_interval)]
ssh_opts += ["-o", "ServerAliveCountMax={}".format(args.server_alive_count_max)]
ssh_opts += extract_passthrough_opts(args, "ssh")
prefix, at, name = args.name.rpartition("@")
host_opts, hostname = prepare_ssh_host_opts(username=prefix, hostname=name,
bless_config_filename=args.bless_config,
use_kms_auth=args.use_kms_auth)
os.execvp("ssh", ["ssh"] + ssh_opts + host_opts + [hostname] + args.ssh_args)
ssh_parser = register_parser(ssh, help="Connect to an EC2 instance", description=__doc__)
ssh_parser.add_argument("name")
ssh_parser.add_argument("ssh_args", nargs=argparse.REMAINDER,
help="Arguments to pass to ssh; please see " + BOLD("man ssh") + " for details")
ssh_parser.add_argument("--server-alive-interval", help=argparse.SUPPRESS)
ssh_parser.add_argument("--server-alive-count-max", help=argparse.SUPPRESS)
add_bless_and_passthrough_opts(ssh_parser, "ssh")
def scp(args):
"""
Transfer files to or from EC2 instance.
"""
scp_opts, host_opts = extract_passthrough_opts(args, "scp"), []
user_or_hostname_chars = string.ascii_letters + string.digits
for i, arg in enumerate(args.scp_args):
if arg[0] in user_or_hostname_chars and ":" in arg:
hostname, colon, path = arg.partition(":")
To delete EFS filesystems, use ``aegea rm``.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, argparse, base64, socket
from . import register_parser
from .ls import register_listing_parser
from .util.printing import page_output, tabulate
from .util.aws import clients, ensure_vpc, encode_tags, make_waiter, ensure_security_group, resolve_security_group
def efs(args):
efs_parser.print_help()
efs_parser = register_parser(efs, help="Manage Elastic Filesystem resources", description=__doc__)
def ls(args):
table = []
for filesystem in clients.efs.describe_file_systems()["FileSystems"]:
for mount_target in clients.efs.describe_mount_targets(FileSystemId=filesystem["FileSystemId"])["MountTargets"]:
mount_target.update(filesystem)
table.append(mount_target)
args.columns += args.mount_target_columns
page_output(tabulate(table, args, cell_transforms={"SizeInBytes": lambda x, r: x.get("Value") if x else None}))
parser = register_listing_parser(ls, parent=efs_parser, help="List EFS filesystems")
parser.add_argument("--mount-target-columns", nargs="+")
def create(args):
vpc = ensure_vpc()
if args.security_groups is None: