Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# negative sort keys, but that's fine.
new_item_values = [
{
'worksheet_uuid': worksheet_uuid,
'bundle_uuid': bundle_uuid,
'subworksheet_uuid': subworksheet_uuid,
'value': self.encode_str(value),
'type': item_type,
'sort_key': (last_item_id + i - len(new_items)),
}
for (i, (bundle_uuid, subworksheet_uuid, value, item_type)) in enumerate(new_items)
]
with self.engine.begin() as connection:
result = connection.execute(cl_worksheet_item.delete().where(clause))
message = 'Found extra items for worksheet %s' % (worksheet_uuid,)
precondition(result.rowcount <= length, message)
if result.rowcount < length:
raise UsageError('Worksheet %s was updated concurrently!' % (worksheet_uuid,))
self.do_multirow_insert(connection, cl_worksheet_item, new_item_values)
def new_worksheet(self, worksheet):
"""
Save the given (empty) worksheet to the database. On success, set its id.
"""
message = 'save_worksheet called with non-empty worksheet: %s' % (worksheet,)
precondition(not worksheet.items, message)
worksheet.validate()
worksheet_value = worksheet.to_dict()
worksheet_value.pop('tags')
worksheet_value.pop('items')
worksheet_value.pop('last_item_id')
with self.engine.begin() as connection:
result = connection.execute(cl_worksheet.insert().values(worksheet_value))
worksheet.id = result.lastrowid
def update_in_memory(self, row, strict=False):
metadata = row.pop('metadata', None)
dependencies = row.pop('dependencies', None)
if strict:
precondition(metadata is not None, 'No metadata: %s' % (row,))
precondition(dependencies is not None, 'No dependencies: %s' % (row,))
if 'uuid' not in row:
row['uuid'] = spec_util.generate_uuid()
super(Bundle, self).update_in_memory(row)
if metadata is not None:
self.metadata = Metadata(self.METADATA_SPECS, metadata)
if dependencies is not None:
self.dependencies = [Dependency(dep) for dep in dependencies]
def fetch_one(self, resource_type, resource_id=None, params=None):
"""
Same as JsonApiClient.fetch, but always returns exactly one resource
dictionary, or throws a NotFoundError if the results contain any more
or less than exactly one.
"""
results = self.fetch(resource_type,
resource_id=resource_id, params=params)
precondition(not isinstance(results, list) or len(results) == 1,
"Got %d %s when expecting exactly 1." %
(len(results), resource_type))
if not isinstance(results, list):
return results
else:
return results[0]
def check_uuid(uuid_str):
"""
Raise a PreconditionViolation if the uuid does not conform to its regex.
"""
message = 'uuids must match %s, was %s' % (UUID_REGEX.pattern, uuid_str)
precondition(UUID_REGEX.match(uuid_str), message)
def _fetch_bundle(uuid):
"""
Fetch bundle by UUID.
Query parameters:
- `include_display_metadata`: `1` to include additional metadata helpful
for displaying the bundle info, `0` to omit them. Default is `0`.
- `include`: comma-separated list of related resources to include, such as "owner"
"""
document = build_bundles_document([uuid])
precondition(len(document['data']) == 1, "data should have exactly one element")
document['data'] = document['data'][0] # Flatten data list
return document
def update_in_memory(self, row, strict=False):
'''
Initialize the attributes on this object from the data in the row.
The attributes of the row are inferred from the table columns.
If strict is True, checks that all columns are included in the row.
'''
if strict:
for column in self.COLUMNS:
precondition(column in row, 'Row %s missing column: %s' % (row, column))
for (key, value) in list(row.items()):
message = 'Row %s has extra column: %s' % (row, key)
precondition(key in self.COLUMNS or key == 'id', message)
setattr(self, key, value)
def get_relative_path(root, path):
"""
Return the relative path from root to path, which should be nested under root.
"""
precondition(path.startswith(root), '%s is not under %s' % (path, root))
return path[len(root) :]
def _do_bundle_action(self, bundle_uuid, worker_message, action_string):
"""
Sends the message to the worker to do the bundle action, and adds the
action string to the bundle metadata.
"""
bundle = self.model.get_bundle(bundle_uuid)
if bundle.state != State.RUNNING:
raise UsageError('Cannot execute this action on a bundle that is not running.')
worker = self.worker_model.get_bundle_worker(bundle_uuid)
precondition(
self.worker_model.send_json_message(worker['socket_id'], worker_message, 60),
'Unable to reach worker.')
new_actions = getattr(bundle.metadata, 'actions', []) + [action_string]
db_update = {'metadata': {'actions': new_actions}}
self.model.update_bundle(bundle, db_update)
def hash_file_contents(path):
"""
Return the hash of the file's contents, read in blocks of size BLOCK_SIZE.
"""
message = 'hash_file called with relative path: %s' % (path,)
precondition(os.path.isabs(path), message)
if os.path.islink(path):
contents_hash = hashlib.sha1(LINK_PREFIX)
contents_hash.update(os.readlink(path))
else:
contents_hash = hashlib.sha1(FILE_PREFIX)
with open(path, 'rb') as file_handle:
while True:
data = file_handle.read(BLOCK_SIZE)
if not data:
break
contents_hash.update(data)
return contents_hash.hexdigest()