How to use the aegea.logger.debug function in aegea

To help you get started, we’ve selected a few aegea examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github kislyuk / aegea / aegea / ebs.py View on Github external
if re.search("VolumeInUse.+already attached to an instance", str(e)):
                if resources.ec2.Volume(args.volume_id).attachments[0]["InstanceId"] == args.instance:
                    logger.warn("Volume %s is already attached to instance %s", args.volume_id, args.instance)
                    break
            if i + 1 < len(devices) and re.search("InvalidParameterValue.+Attachment point.+is already in use", str(e)):
                logger.warn("BDM node %s is already in use, looking for next available node", devices[i])
                continue
            raise
    res = clients.ec2.get_waiter("volume_in_use").wait(VolumeIds=[args.volume_id])
    if args.format or args.mount:
        for i in range(30):
            try:
                find_devnode(args.volume_id)
                break
            except Exception:
                logger.debug("Waiting for device node to appear for %s", args.volume_id)
                time.sleep(1)
    if args.format:
        logger.info("Formatting %s (%s)", args.volume_id, find_devnode(args.volume_id))
        label = get_fs_label(args.volume_id)
        command = get_mkfs_command(fs_type=args.format, label=label) + find_devnode(args.volume_id)
        subprocess.check_call(command, shell=True, stdout=sys.stderr.buffer)
    if args.mount:
        logger.info("Mounting %s at %s", args.volume_id, args.mount)
        subprocess.check_call(["mount", find_devnode(args.volume_id), args.mount], stdout=sys.stderr.buffer)
    return res
parser_attach = register_parser(attach, parent=ebs_parser, help="Attach an EBS volume to an EC2 instance")
github kislyuk / aegea / aegea / util / cloudinit.py View on Github external
manifest = OrderedDict()
    aegea_conf = os.getenv("AEGEA_CONFIG_FILE")
    targz = io.BytesIO()
    tar = tarfile.open(mode="w:gz", fileobj=targz) if dest == "tarfile" else None

    for rootfs_skel_dir in rootfs_skel_dirs:
        if rootfs_skel_dir == "auto":
            fn = os.path.join(os.path.dirname(__file__), "..", "rootfs.skel")
        elif aegea_conf:
            # FIXME: not compatible with colon-separated AEGEA_CONFIG_FILE
            fn = os.path.join(os.path.dirname(aegea_conf), rootfs_skel_dir)
        elif os.path.exists(rootfs_skel_dir):
            fn = os.path.abspath(os.path.normpath(rootfs_skel_dir))
        else:
            raise Exception("rootfs_skel directory {} not found".format(fn))
        logger.debug("Trying rootfs.skel: %s" % fn)
        if not os.path.exists(fn):
            raise Exception("rootfs_skel directory {} not found".format(fn))
        for root, dirs, files in os.walk(fn):
            for file_ in files:
                path = os.path.join("/", os.path.relpath(root, fn), file_)
                if dest == "cloudinit":
                    add_file_to_cloudinit_manifest(os.path.join(root, file_), path, manifest)
                elif dest == "tarfile":
                    tar.add(os.path.join(root, file_), path)

    if dest == "cloudinit":
        return list(manifest.values())
    elif dest == "tarfile":
        tar.close()
        return targz.getvalue()
github kislyuk / aegea / aegea / util / aws / spot.py View on Github external
def __call__(self, **kwargs):
        self.spot_fleet_request_config["LaunchSpecifications"] = list(self.launch_specs())
        logger.debug(self.spot_fleet_request_config)
        res = clients.ec2.request_spot_fleet(DryRun=self.dry_run,
                                             SpotFleetRequestConfig=self.spot_fleet_request_config,
                                             **kwargs)
        return res["SpotFleetRequestId"]
github kislyuk / aegea / aegea / util / aws / logs.py View on Github external
policy.add_statement(action="s3:PutObject", resource=str(bucket_arn) + "/*", principal=logs_principal)
    lifecycle = S3BucketLifecycleBuilder(expiration=dict(Days=30))
    lifecycle.add_rule(abort_incomplete_multipart_upload=20)
    bucket = ensure_s3_bucket(bucket_name, policy=policy, lifecycle=lifecycle)
    if not args.end_time:
        args.end_time = Timestamp.match_precision(Timestamp("-0s"), args.start_time)
    export_task_args = dict(logGroupName=args.log_group,
                            fromTime=int(timestamp(args.start_time) * 1000),
                            to=int(timestamp(args.end_time) * 1000),
                            destination=bucket.name)
    if args.log_stream:
        export_task_args.update(logStreamNamePrefix=args.log_stream)
    cache_key = hashlib.sha256(json.dumps(export_task_args, sort_keys=True).encode()).hexdigest()[:32]
    export_task_args.update(destinationPrefix=cache_key)
    for log_object in bucket.objects.filter(Prefix=cache_key):
        logger.debug("Reusing completed export task %s", log_object.key)
        break
    else:
        logger.debug("Starting new log export task %s", export_task_args)
        task_desc = clients.logs.create_export_task(**export_task_args)
        try:
            while task_desc.get("status", {}).get("code") != "COMPLETED":
                res = clients.logs.describe_export_tasks(taskId=task_desc["taskId"])
                assert len(res["exportTasks"]) == 1
                task_desc = res["exportTasks"][0]
                if task_desc["status"]["code"] in {"CANCELLED", "FAILED"}:
                    raise Exception("Log export task failed: " + task_desc["status"]["message"])
                msg = "log export task: {logGroupName} {from}..{to} -> s3://{destination}/{destinationPrefix} %s"
                logger.info(msg.format(**task_desc), task_desc["status"]["code"])
                time.sleep(1)
        finally:
            try:
github kislyuk / aegea / aegea / ecs.py View on Github external
try:
        task_defn = clients.ecs.describe_task_definition(taskDefinition=task_defn_name)["taskDefinition"]
        assert task_defn["status"] == "ACTIVE"
        assert "FARGATE" in task_defn["compatibilities"]
        desc_keys = ["family", "revision", "taskDefinitionArn", "status", "compatibilities", "placementConstraints",
                     "requiresAttributes"]
        task_desc = {key: task_defn.pop(key) for key in desc_keys}
        if expect_task_defn["cpu"].endswith(" vCPU"):
            expect_task_defn["cpu"] = str(int(expect_task_defn["cpu"][:-len(" vCPU")]) * 1024)
        if expect_task_defn["memory"].endswith(" GB"):
            expect_task_defn["memory"] = str(int(expect_task_defn["memory"][:-len(" GB")]) * 1024)
        assert task_defn == expect_task_defn
        logger.debug("Reusing task definition %s", task_desc["taskDefinitionArn"])
    except (ClientError, AssertionError):
        logger.debug("Registering new ECS task definition %s", task_defn_name)
        task_desc = clients.ecs.register_task_definition(family=task_defn_name, **expect_task_defn)["taskDefinition"]

    network_config = {
        'awsvpcConfiguration': {
            'subnets': [
                subnet.id for subnet in vpc.subnets.all()
            ],
            'securityGroups': [ensure_security_group(args.security_group, vpc).id],
            'assignPublicIp': 'ENABLED'
        }
    }
    container_overrides = [dict(name=args.task_name, command=command, environment=environment)]
    res = clients.ecs.run_task(cluster=args.cluster,
                               taskDefinition=task_desc["taskDefinitionArn"],
                               launchType="FARGATE",
                               networkConfiguration=network_config,
github kislyuk / aegea / aegea / util / aws / logs.py View on Github external
bucket = ensure_s3_bucket(bucket_name, policy=policy, lifecycle=lifecycle)
    if not args.end_time:
        args.end_time = Timestamp.match_precision(Timestamp("-0s"), args.start_time)
    export_task_args = dict(logGroupName=args.log_group,
                            fromTime=int(timestamp(args.start_time) * 1000),
                            to=int(timestamp(args.end_time) * 1000),
                            destination=bucket.name)
    if args.log_stream:
        export_task_args.update(logStreamNamePrefix=args.log_stream)
    cache_key = hashlib.sha256(json.dumps(export_task_args, sort_keys=True).encode()).hexdigest()[:32]
    export_task_args.update(destinationPrefix=cache_key)
    for log_object in bucket.objects.filter(Prefix=cache_key):
        logger.debug("Reusing completed export task %s", log_object.key)
        break
    else:
        logger.debug("Starting new log export task %s", export_task_args)
        task_desc = clients.logs.create_export_task(**export_task_args)
        try:
            while task_desc.get("status", {}).get("code") != "COMPLETED":
                res = clients.logs.describe_export_tasks(taskId=task_desc["taskId"])
                assert len(res["exportTasks"]) == 1
                task_desc = res["exportTasks"][0]
                if task_desc["status"]["code"] in {"CANCELLED", "FAILED"}:
                    raise Exception("Log export task failed: " + task_desc["status"]["message"])
                msg = "log export task: {logGroupName} {from}..{to} -> s3://{destination}/{destinationPrefix} %s"
                logger.info(msg.format(**task_desc), task_desc["status"]["code"])
                time.sleep(1)
        finally:
            try:
                clients.logs.cancel_export_task(taskId=task_desc["taskId"])
                # TODO: if cancel successful, clean up s3 prefix
            except Exception:
github kislyuk / aegea / aegea / util / aws / __init__.py View on Github external
def ensure_s3_bucket(name=None, policy=None, lifecycle=None):
    if name is None:
        name = "aegea-assets-{}".format(ARN.get_account_id())
    bucket = resources.s3.Bucket(name)
    try:
        clients.s3.head_bucket(Bucket=bucket.name)
    except ClientError as e:
        logger.debug(e)
        if ARN.get_region() == "us-east-1":
            bucket.create()
        else:
            bucket.create(CreateBucketConfiguration=dict(LocationConstraint=ARN.get_region()))
    bucket.wait_until_exists()
    if policy:
        bucket.Policy().put(Policy=str(policy))
    if lifecycle:
        bucket.LifecycleConfiguration().put(LifecycleConfiguration=dict(lifecycle))
    return bucket
github kislyuk / aegea / aegea / batch.py View on Github external
def save_job_desc(job_desc):
    try:
        cprops = dict(image="busybox", vcpus=1, memory=4,
                      environment=[dict(name="job_desc", value=json.dumps(job_desc))])
        jd_name = "{}_job_desc_{}".format(__name__.replace(".", "_"), job_desc["jobId"])
        clients.batch.register_job_definition(jobDefinitionName=jd_name, type="container", containerProperties=cprops)
    except Exception as e:
        logger.debug("Error while saving job description: %s", e)