Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
" service sshd reload)")
if provision_users:
# TODO: UIDs should be deterministic
# uid_bytes = hashlib.sha256(username.encode()).digest()[-2:]
# uid = 2000 + (int.from_bytes(uid_bytes, byteorder=sys.byteorder) // 2)
cloud_config_data["users"] = [dict(name=u, gecos="", sudo="ALL=(ALL) NOPASSWD:ALL") for u in provision_users]
for key in sorted(kwargs):
cloud_config_data[key] = kwargs[key]
if host_key is not None:
buf = StringIO()
host_key.write_private_key(buf)
cloud_config_data["ssh_keys"] = dict(rsa_private=buf.getvalue(),
rsa_public=get_public_key_from_pair(host_key))
payload = encode_cloud_config_payload(cloud_config_data, mime_multipart_archive=mime_multipart_archive)
if len(payload) >= 16384:
logger.warn("Cloud-init payload is too large to be passed in user data, extracting rootfs.skel")
upload_bootstrap_asset(cloud_config_data, rootfs_skel_dirs)
payload = encode_cloud_config_payload(cloud_config_data, mime_multipart_archive=mime_multipart_archive)
return payload
def attach(args):
if args.instance is None:
args.instance = get_metadata("instance-id")
devices = args.device if args.device else ["xvd" + chr(i + 1) for i in reversed(range(ord("a"), ord("z")))]
for i, device in enumerate(devices):
try:
args.device = devices[i]
res = attach_volume(args)
break
except ClientError as e:
if re.search("VolumeInUse.+already attached to an instance", str(e)):
if resources.ec2.Volume(args.volume_id).attachments[0]["InstanceId"] == args.instance:
logger.warn("Volume %s is already attached to instance %s", args.volume_id, args.instance)
break
if i + 1 < len(devices) and re.search("InvalidParameterValue.+Attachment point.+is already in use", str(e)):
logger.warn("BDM node %s is already in use, looking for next available node", devices[i])
continue
raise
res = clients.ec2.get_waiter("volume_in_use").wait(VolumeIds=[args.volume_id])
if args.format or args.mount:
for i in range(30):
try:
find_devnode(args.volume_id)
break
except Exception:
logger.debug("Waiting for device node to appear for %s", args.volume_id)
time.sleep(1)
if args.format:
logger.info("Formatting %s (%s)", args.volume_id, find_devnode(args.volume_id))
def audit(args):
auditor = Auditor()
auditor.__dict__.update(vars(args))
table = []
for method_name in natural_sort(dir(auditor)):
if method_name.startswith("audit"):
method = getattr(auditor, method_name)
try:
method()
table.append([GREEN("PASS"), method.__doc__])
except Exception as e:
logger.warn("%s: %s", method, e)
table.append([RED("FAIL"), method.__doc__])
# TODO: WHITE("NO TEST")
page_output(format_table(table, column_names=["Result", "Test"], max_col_width=120))
def attach(args):
if args.instance is None:
args.instance = get_metadata("instance-id")
devices = args.device if args.device else ["xvd" + chr(i + 1) for i in reversed(range(ord("a"), ord("z")))]
for i, device in enumerate(devices):
try:
args.device = devices[i]
res = attach_volume(args)
break
except ClientError as e:
if re.search("VolumeInUse.+already attached to an instance", str(e)):
if resources.ec2.Volume(args.volume_id).attachments[0]["InstanceId"] == args.instance:
logger.warn("Volume %s is already attached to instance %s", args.volume_id, args.instance)
break
if i + 1 < len(devices) and re.search("InvalidParameterValue.+Attachment point.+is already in use", str(e)):
logger.warn("BDM node %s is already in use, looking for next available node", devices[i])
continue
raise
res = clients.ec2.get_waiter("volume_in_use").wait(VolumeIds=[args.volume_id])
if args.format or args.mount:
for i in range(30):
try:
find_devnode(args.volume_id)
break
except Exception:
logger.debug("Waiting for device node to appear for %s", args.volume_id)
time.sleep(1)
if args.format:
logger.info("Formatting %s (%s)", args.volume_id, find_devnode(args.volume_id))
label = get_fs_label(args.volume_id)
command = get_mkfs_command(fs_type=args.format, label=label) + find_devnode(args.volume_id)
subprocess.check_call(command, shell=True, stdout=sys.stderr.buffer)
def launch(args):
if args.spot_price or args.duration_hours or args.cores or args.min_mem_per_core_gb:
args.spot = True
if args.use_dns:
dns_zone = DNSZone()
ssh_key_name = ensure_ssh_key(name=args.ssh_key_name, base_name=__name__,
verify_pem_file=args.verify_ssh_key_pem_file)
# TODO: move all account init checks into init helper with region-specific semaphore on s3
try:
ensure_log_group("syslog")
except ClientError:
logger.warn("Unable to query or create cloudwatch syslog group. Logs may be undeliverable")
try:
i = resolve_instance_id(args.hostname)
msg = "The hostname {} is being used by {} (state: {})"
raise Exception(msg.format(args.hostname, i, resources.ec2.Instance(i).state["Name"]))
except AegeaException:
validate_hostname(args.hostname)
assert not args.hostname.startswith("i-")
ami_tags = dict(tag.split("=", 1) for tag in args.ami_tags or [])
args.ami = resolve_ami(args.ami, **ami_tags)
if args.subnet:
subnet = resources.ec2.Subnet(args.subnet)
vpc = resources.ec2.Vpc(subnet.vpc_id)
else:
vpc = ensure_vpc()
subnet = ensure_subnet(vpc, availability_zone=args.availability_zone)
if not subnet.map_public_ip_on_launch:
if bless_config_filename:
with open(bless_config_filename) as fh:
bless_config = yaml.safe_load(fh)
ensure_bless_ssh_cert(ssh_key_name=ssh_key_name,
bless_config=bless_config,
use_kms_auth=use_kms_auth)
add_ssh_key_to_agent(ssh_key_name)
instance = get_instance(hostname)
if not username:
username = bless_config["client_config"]["remote_users"][0]
bastion_config = match_instance_to_bastion(instance=instance, bastions=bless_config["ssh_config"]["bastions"])
if bastion_config:
jump_host = bastion_config["user"] + "@" + bastion_config["pattern"]
return ["-o", "ProxyJump=" + jump_host], username + "@" + instance.private_ip_address
elif instance.public_dns_name:
logger.warn("No bastion host found for %s, trying direct connection", instance.private_ip_address)
return [], username + "@" + instance.public_dns_name
else:
raise AegeaException("No bastion host or public route found for {}".format(instance))
else:
if get_instance(hostname).key_name is not None:
add_ssh_key_to_agent(get_instance(hostname).key_name)
if not username:
username = get_linux_username()
return [], username + "@" + resolve_instance_public_dns(hostname)
if args.client_token is None:
args.client_token = get_client_token(ARN.get_iam_username(), __name__)
try:
if args.spot:
launch_spec["UserData"] = base64.b64encode(launch_spec["UserData"]).decode()
if args.duration_hours or args.cores or args.min_mem_per_core_gb:
spot_fleet_args = dict(launch_spec=launch_spec, client_token=args.client_token)
for arg in "cores", "min_mem_per_core_gb", "spot_price", "duration_hours", "dry_run":
if getattr(args, arg, None):
spot_fleet_args[arg] = getattr(args, arg)
if "cores" in spot_fleet_args:
spot_fleet_args["min_cores_per_instance"] = spot_fleet_args["cores"]
if args.instance_type != parser.get_default("instance_type"):
msg = ("Using --instance-type with spot fleet may unnecessarily constrain available instances. "
"Consider using --cores and --min-mem-per-core-gb instead")
logger.warn(msg)
class InstanceSpotFleetBuilder(SpotFleetBuilder):
def instance_types(self, **kwargs):
yield args.instance_type, 1
spot_fleet_builder = InstanceSpotFleetBuilder(**spot_fleet_args)
else:
spot_fleet_builder = SpotFleetBuilder(**spot_fleet_args)
logger.info("Launching {}".format(spot_fleet_builder))
sfr_id = spot_fleet_builder()
instances = []
while not instances:
res = clients.ec2.describe_spot_fleet_instances(SpotFleetRequestId=sfr_id)
instances = res["ActiveInstances"]
time.sleep(0 if instances else 1)
# FIXME: there may be multiple instances, and spot fleet provides no indication of whether the SFR is
# fulfilled
def add_ssh_key_to_agent(name):
try:
subprocess.check_call(["ssh-add", get_ssh_key_path(name)], timeout=5)
except Exception as e:
logger.warn("Failed to add %s to SSH keychain: %s. Connections may fail", get_ssh_key_path(name), e)