Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if isinstance(mongo_connector, ShardedClusterConnector):
if mongo_connector.is_balancer_active():
need_to_resume_balancer = True
self._stop_balancer(backup, mongo_connector)
# monitor balancer during kickoff window
mongo_connector.start_balancer_activity_monitor()
# run fsync lock
if use_fysnclock:
self._fsynclock(backup, mongo_connector)
else:
msg = ("Snapshot Backup '%s' will be taken WITHOUT "
"locking database and IO!" % backup.id)
logger.warning(msg)
update_backup(backup, event_type=EventType.WARNING,
event_name="NOT_LOCKED",
message=msg)
# suspend io
if use_suspend_io:
self._suspend_io(backup, mongo_connector, cbs)
# create the snapshot
self._create_snapshot(backup, cbs)
# wait until snapshot is pending or completed or error
self._wait_for_pending_status(backup, cbs)
# resume io/unlock
if use_suspend_io:
# update backup name and desc
update_props = list()
name = self.get_backup_name(backup)
if not backup.name or (update and name != backup.name):
backup.name = name
update_props.append("name")
desc = self.get_backup_description(backup)
if not backup.description or (update and desc != backup.description):
backup.description = desc
update_props.append("description")
if update_props:
update_backup(backup, properties=update_props)
# mongo version is >= 2.6.0
if (mongo_version >= MongoNormalizedVersion("2.6.0") and
database_name != None and
self.dump_users is not False):
dump_options.append("--dumpDbUsersAndRoles")
# apply overrides
self._apply_dump_options_overrides(dump_options)
log_file_name = _log_file_name(backup)
# execute dump command
dump_info = self.backup_assistant.dump_backup(backup, uri, destination, log_file_name, options=dump_options)
if dump_info and "dumpCollectionCounts" in dump_info:
backup.data_stats["dumpCollectionCounts"] = dump_info["dumpCollectionCounts"]
update_backup(backup, properties="dataStats",
event_name=EVENT_END_EXTRACT,
message="Dump completed")
snapshot_ref = cbs.create_snapshot(name_template, desc_template)
# set sourceWasLocked field
snapshot_ref.source_was_locked = backup.is_event_logged(BackupEventNames.FSYNCLOCK_END)
backup.target_reference = snapshot_ref
msg = ("Composite snapshot created successfully "
"(composed of %s snapshots)" % count)
logger.info("%s, backup id '%s' " % (msg, backup.id))
update_backup(backup, properties="targetReference",
event_name="END_CREATE_SNAPSHOT",
message=msg)
###########################################################################
def _compute_source_stats(self, backup, mongo_connector):
"""
computes backup source stats
:param backup:
:param mongo_connector:
:return:
"""
logger.info("Computing source stats for backup '%s' , connector '%s'..." % (backup.id, mongo_connector))
dbname = backup.source.database_name
try:
if (self.backup_mode == BackupMode.ONLINE and
mongo_connector.is_online()):
backup.source_stats = mongo_connector.get_stats(
only_for_db=dbname)
# save source stats
update_backup(backup, properties="sourceStats",
event_name="COMPUTED_SOURCE_STATS",
message="Computed source stats")
logger.info("Finished computing source stats for backup '%s' , connector '%s'." % (backup.id,
mongo_connector))
except Exception, e:
if is_connection_exception(e) and self.allow_offline_backups:
# switch to offline mode
logger.info("Caught a connection error while trying to compute"
" source stats for backup '%s'. %s. Switching to "
"OFFLINE mode..." % (backup.id, e))
self._set_backup_mode(backup, BackupMode.OFFLINE)
else:
raise
raise TargetUploadError("Upload target mismatch! requested to upload to %s targets and got %s target"
" references back" % (len(all_targets), len(target_references)))
# set the target reference
target_reference = target_references[0]
# keep old target reference if it exists to delete it because it would
# be the failed file reference
failed_reference = backup.target_reference
backup.target_reference = target_reference
# set the secondary target references
if backup.secondary_targets:
backup.secondary_target_references = target_references[1:]
update_backup(backup, properties=["targetReference",
"secondaryTargetReferences"],
event_name=EVENT_END_UPLOAD,
message="Upload completed!")
# remove failed reference if exists
if failed_reference:
try:
backup.target.delete_file(failed_reference)
except Exception, ex:
logger.error("Exception while deleting failed backup file: %s"
% ex)
target_ref = backup.target_reference
# encrypted snapshots are not sharable. Log warning...
if is_snapshot_volume_encrypted(target_ref):
msg = "Will not share snapshot backup '%s' because the volume is encrypted" % backup.id
logger.warning(msg)
update_backup(backup, event_type=EventType.WARNING,
event_name="NO_ENCRYPTED_EBS_SHARING",
message=msg)
else:
target_ref = cbs.share_snapshot(target_ref, user_ids=user_ids, groups=groups)
backup.target_reference = target_ref
update_backup(backup, properties="targetReference",
event_name="SHARE_SNAPSHOT",
message=msg)
logger.info("Snapshot backup '%s' shared successfully!" %
backup.id)
raise ConfigurationError("Invalid suspend io attempt. '%s' has to"
" be a MongoServer" % mongo_connector)
if ensure_local and not self.backup_assistant.is_connector_local_to_assistant(mongo_connector, backup):
err = ("Cannot suspend io for '%s' because is not local to"
" this box" % mongo_connector)
raise ConfigurationError(err)
try:
msg = "Running suspend IO for '%s'..." % mongo_connector
logger.info(msg)
update_backup(backup, event_name=BackupEventNames.SUSPEND_IO, message=msg)
self.backup_assistant.suspend_io(backup, mongo_connector, cloud_block_storage)
update_backup(backup, event_name=BackupEventNames.SUSPEND_IO_END, message="Suspend IO done!")
except Exception, ex:
msg = ("Suspend IO Error for '%s'" % mongo_connector)
logger.exception(msg)
if isinstance(ex, MBSError):
raise
else:
raise SuspendIOError(msg, cause=ex)
finally:
self._start_max_io_suspend_monitor(backup, mongo_connector,
cloud_block_storage)
update_backup(backup,
event_name="ERROR_HANDLING_END_TAR",
message="Finished taring failed dump")
# upload
logger.info("Uploading tar for failed backup '%s' ..." % backup.id)
update_backup(backup,
event_name="ERROR_HANDLING_START_UPLOAD",
message="Uploading failed dump tar")
# upload failed tar file and allow overwriting existing
target_reference = self.backup_assistant.upload_backup(backup, failed_tar_filename, backup.target,
destination_path=failed_dest)
backup.target_reference = target_reference
update_backup(backup, properties="targetReference",
event_name="ERROR_HANDLING_END_UPLOAD",
message="Finished uploading failed tar")
def _wait_for_snapshot_status(self, backup, cbs, wait_status,
sleep_time=5):
msg = ("Waiting for backup '%s' snapshot status to be in %s" %
(backup.id, wait_status))
logger.info(msg)
update_backup(backup, message=msg)
# wait until snapshot is completed and keep target ref up to date
snapshot_ref = backup.target_reference
wait_status = listify(wait_status)
while snapshot_ref.status not in wait_status:
logger.info("Checking updates for backup '%s' snapshot" %
backup.id)
new_snapshot_ref = cbs.check_snapshot_updates(snapshot_ref)
if new_snapshot_ref:
logger.info("Detected updates for backup '%s' snapshot " %
backup.id)
diff = snapshot_ref.diff(new_snapshot_ref)
logger.info("Diff: \n%s" % document_pretty_string(diff))
snapshot_ref = new_snapshot_ref
backup.target_reference = snapshot_ref
update_backup(backup, properties="targetReference")