Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def update_cluster_durability(cmd, client, resource_group_name, cluster_name, node_type, durability_level):
cli_ctx = cmd.cli_ctx
cluster = client.get(resource_group_name, cluster_name)
node_types = [n for n in cluster.node_types if n.name.lower() == node_type.lower()]
if node_types is None:
raise CLIError("Failed to find the node type in the cluster")
compute_client = compute_client_factory(cli_ctx)
vmss = compute_client.virtual_machine_scale_sets.get(
resource_group_name, node_type)
fabric_exts = [ext for ext in vmss.virtual_machine_profile.extension_profile.extensions
if ext.type.lower() == SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME or ext.type.lower() == SERVICE_FABRIC_LINUX_NODE_EXT_NAME]
if fabric_exts is None:
raise CLIError("Failed to find service fabric extension")
fabric_ext = fabric_exts[0]
if fabric_ext.settings['durabilityLevel'] == durability_level:
return cluster
fabric_ext.settings['durabilityLevel'] = durability_level
fabric_ext.settings['enableParallelJobs'] = True
vmss_poll = compute_client.virtual_machine_scale_sets.create_or_update(
resource_group_name, vmss.name, vmss)
node_type = node_types[0]
node_type.durability_level = durability_level
patch_request = ClusterUpdateParameters(node_types=node_types)
update_cluster_poll = client.update(
resource_group_name, cluster_name, patch_request)
def wait(ctx, poller):
return LongRunningOperation(ctx)(poller)
def set_policy(client, resource_group_name, vault_name, policy, policy_name):
policy_object = _get_policy_from_json(client, policy)
retention_range_in_days = policy_object.properties.instant_rp_retention_range_in_days
schedule_run_frequency = policy_object.properties.schedule_policy.schedule_run_frequency
# Validating range of days input
if schedule_run_frequency == 'Weekly' and retention_range_in_days != 5:
raise CLIError(
"""
Retention policy range must be equal to 5.
""")
if schedule_run_frequency == 'Daily' and (retention_range_in_days > 5 or retention_range_in_days < 1):
raise CLIError(
"""
Retention policy range must be between 1 to 5.
""")
error_message = "For SnapshotRetentionRangeInDays, the minimum value is 1 and"\
"maximum is 5. For weekly backup policies, the only allowed value is 5 "\
". Please set the value accordingly."
if policy_object.properties.schedule_policy.schedule_run_frequency == "Weekly":
if policy_object.properties.instant_rp_retention_range_in_days is not None:
if policy_object.properties.instant_rp_retention_range_in_days != 5:
logger.error(error_message)
else:
policy_object.properties.instant_rp_retention_range_in_days = 5
else:
if policy_object.properties.instant_rp_retention_range_in_days is not None:
logger.debug('Validating the extension %s', ext_file)
if ext_sha256:
valid_checksum, computed_checksum = is_valid_sha256sum(ext_file, ext_sha256)
if valid_checksum:
logger.debug("Checksum of %s is OK", ext_file)
else:
logger.debug("Invalid checksum for %s. Expected '%s', computed '%s'.",
ext_file, ext_sha256, computed_checksum)
raise CLIError("The checksum of the extension does not match the expected value. "
"Use --debug for more information.")
try:
cmd.cli_ctx.get_progress_controller().add(message='Validating')
_validate_whl_extension(ext_file)
except AssertionError:
logger.debug(traceback.format_exc())
raise CLIError('The extension is invalid. Use --debug for more information.')
except CLIError as e:
raise e
logger.debug('Validation successful on %s', ext_file)
# Check for distro consistency
check_distro_consistency()
cmd.cli_ctx.get_progress_controller().add(message='Installing')
# Install with pip
extension_path = get_extension_path(extension_name)
pip_args = ['install', '--target', extension_path, ext_file]
if pip_proxy:
pip_args = pip_args + ['--proxy', pip_proxy]
if pip_extra_index_urls:
for extra_index_url in pip_extra_index_urls:
pip_args = pip_args + ['--extra-index-url', extra_index_url]
def _is_inside_git_directory():
"""
Determines if the user is inside the .git folder of a git repo
"""
try:
is_inside_git_dir = check_output(['git', 'rev-parse', '--is-inside-git-dir'])
except OSError:
raise CLIError('Git is not currently installed.')
except CalledProcessError:
raise CLIError('Current working directory is not a git repository')
git_result = is_inside_git_dir.decode('utf-8').strip()
if git_result == 'false':
return False
elif git_result == 'true':
return True
else:
raise CLIError('Unexpected value from git operation.')
def validate_nodes_count(namespace):
"""Validate that min_count and max_count is set to 1-100"""
if namespace.min_count is not None:
if namespace.min_count < 1 or namespace.min_count > 100:
raise CLIError('--min-count must be in the range [1,100]')
if namespace.max_count is not None:
if namespace.max_count < 1 or namespace.max_count > 100:
raise CLIError('--max-count must be in the range [1,100]')
def _privatedns_remove_record(client, record, record_type, relative_record_set_name, resource_group_name, private_zone_name, keep_empty_record_set, is_list=True):
record_set = client.get(
resource_group_name, private_zone_name, record_type, relative_record_set_name)
record_property = _privatedns_type_to_property_name(record_type)
if is_list:
record_list = getattr(record_set, record_property)
if record_list is not None:
keep_list = [r for r in record_list
if not dict_matches_filter(r.__dict__, record.__dict__)]
if len(keep_list) == len(record_list):
raise CLIError('Record {} not found.'.format(str(record)))
setattr(record_set, record_property, keep_list)
else:
setattr(record_set, record_property, None)
if is_list:
records_remaining = len(getattr(record_set, record_property))
else:
records_remaining = 1 if getattr(record_set, record_property) is not None else 0
if not records_remaining and not keep_empty_record_set:
logger.info('Removing empty %s record set: %s', record_type, relative_record_set_name)
return client.delete(resource_group_name, private_zone_name, record_type, relative_record_set_name)
return client.create_or_update(resource_group_name, private_zone_name, record_type, relative_record_set_name, record_set)
raise CLIError(
"--thumbprint can only specified alone or with --is-admin")
if certificate_common_name or certificate_issuer_thumbprint:
if certificate_issuer_thumbprint is None or certificate_common_name is None:
raise CLIError(
"Both \'--certificate-common-name\' and \'--certificate-issuer-thumbprint should not be None'")
if thumbprint or admin_client_thumbprints or readonly_client_thumbprints or client_certificate_common_names or is_admin:
raise CLIError(
"Only \'--certificate-common-name\' and \'--certificate-issuer-thumbprint\' can be specified together")
if admin_client_thumbprints or readonly_client_thumbprints:
if thumbprint or certificate_common_name or certificate_issuer_thumbprint or client_certificate_common_names or is_admin:
raise CLIError(
"Only \'--admin-client-thumbprints\' and \'--readonly-client-thumbprints\' can be specified together")
if client_certificate_common_names:
if is_admin or thumbprint or certificate_common_name or certificate_issuer_thumbprint or admin_client_thumbprints or readonly_client_thumbprints: # pylint: disable=too-many-boolean-expressions
raise CLIError(
"\'--client-certificate-commonNames\' can only be specified alone")
cluster = client.get(resource_group_name, cluster_name)
def _add_thumbprint(cluster, is_admin, thumbprint):
remove = []
for t in cluster.client_certificate_thumbprints:
if t.certificate_thumbprint.lower() == thumbprint.lower():
remove.append(t)
for t in remove:
cluster.client_certificate_thumbprints.remove(t)
cluster.client_certificate_thumbprints.append(
ClientCertificateThumbprint(is_admin, thumbprint))
def _add_common_name(cluster, is_admin, certificate_common_name, certificate_issuer_thumbprint):
for t in cluster.client_certificate_common_names:
def managed_instance_encryption_protector_update(
client,
resource_group_name,
managed_instance_name,
server_key_type,
kid=None):
'''
Updates a server encryption protector.
'''
if server_key_type == ServerKeyType.service_managed.value:
key_name = 'ServiceManaged'
else:
if kid is None:
raise CLIError('A uri must be provided if the server_key_type is AzureKeyVault.')
key_name = _get_server_key_name_from_uri(kid)
return client.create_or_update(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
server_key_type=server_key_type,
server_key_name=key_name
)
:param str routing_registry_name: Internet Routing Registry / Regional Internet Registry
"""
ExpressRouteCircuitPeering, ExpressRouteCircuitPeeringConfig, ExpressRouteCircuitPeeringType = get_sdk(
ResourceType.MGMT_NETWORK,
'ExpressRouteCircuitPeering',
'ExpressRouteCircuitPeeringConfig',
'ExpressRouteCircuitPeeringType',
mod='models')
# TODO: Remove workaround when issue #1574 is fixed in the service
# region Issue #1574 workaround
circuit = _network_client_factory().express_route_circuits.get(
resource_group_name, circuit_name)
if peering_type == ExpressRouteCircuitPeeringType.microsoft_peering.value and \
circuit.sku.tier == ExpressRouteCircuitSkuTier.standard.value:
raise CLIError("MicrosoftPeering cannot be created on a 'Standard' SKU circuit")
for peering in circuit.peerings:
if peering.vlan_id == vlan_id:
raise CLIError(
"VLAN ID '{}' already in use by peering '{}'".format(vlan_id, peering.name))
# endregion
peering_config = ExpressRouteCircuitPeeringConfig(
advertised_public_prefixes=advertised_public_prefixes,
customer_asn=customer_asn,
routing_registry_name=routing_registry_name) \
if peering_type == ExpressRouteCircuitPeeringType.microsoft_peering.value else None
peering = ExpressRouteCircuitPeering(
peering_type=peering_type, peer_asn=peer_asn, vlan_id=vlan_id,
primary_peer_address_prefix=primary_peer_address_prefix,
secondary_peer_address_prefix=secondary_peer_address_prefix,
shared_key=shared_key,
def validate_update_service_params(stateless, stateful, target_rep_set_size, min_rep_set_size,
rep_restart_wait, quorum_loss_wait, stand_by_replica_keep,
instance_count):
if sum([stateless, stateful]) != 1:
raise CLIError("Must specify either stateful or stateless, not both")
if stateless:
if target_rep_set_size is not None:
raise CLIError("Cannot specify target replica set size for stateless service")
if min_rep_set_size is not None:
raise CLIError("Cannot specify minimum replica set size for stateless service")
if rep_restart_wait is not None:
raise CLIError("Cannot specify replica restart wait duration for stateless service")
if quorum_loss_wait is not None:
raise CLIError("Cannot specify quorum loss wait duration for stateless service")
if stand_by_replica_keep is not None:
raise CLIError("Cannot specify standby replica keep duration for stateless service")
if stateful:
if instance_count is not None:
raise CLIError("Cannot specify an instance count for a stateful service")