Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _convert_and_save_entity(self, key, document, object_type, nested_object_types):
if key not in self._documents_by_id:
entity, metadata, original_metadata, original_document = Utils.convert_to_entity(document, object_type,
self.conventions,
nested_object_types)
self.save_entity(key, entity, original_metadata, metadata, original_document)
response = self.session.requests_executor.execute(query_command)
if response is None:
return []
if response["IsStale"] and self.wait_for_non_stale_results:
if time.time() > end_time:
raise ErrorResponseException("The index is still stale after reached the timeout")
continue
break
results = []
response_results = response.pop("Results")
response_includes = response.pop("Includes", None)
self.session.save_includes(response_includes)
for result in response_results:
entity, metadata, original_metadata, original_document = Utils.convert_to_entity(result, self.object_type,
conventions,
self.nested_object_types)
if self.object_type != dict and not self.fields_to_fetch:
self.session.save_entity(key=original_metadata.get("@id", None), entity=entity,
original_metadata=original_metadata,
metadata=metadata, original_document=original_document)
results.append(entity)
if self._with_statistics:
return results, response
return results
from pyravendb.store.stream import IncrementalJsonParser
import ijson
index_query = query.get_index_query()
if index_query.wait_for_non_stale_results:
raise exceptions.NotSupportedException("Since stream() does not wait for indexing (by design), "
"streaming query with wait_for_non_stale_results is not supported.")
self.session.increment_requests_count()
command = QueryStreamCommand(index_query)
with self.session.requests_executor.execute(command) as response:
basic_parse = IncrementalJsonParser.basic_parse(response)
parser = ijson.backend.common.parse(basic_parse)
results = ijson.backend.common.items(parser, "Results")
for result in next(results, None):
document, metadata, _, _ = Utils.convert_to_entity(result, query.object_type, self.session.conventions,
query.nested_object_types)
yield {"document": document, "metadata": metadata, "id": metadata.get("@id", None),
"change-vector": metadata.get("@change-vector", None)}
def initialize(self):
last_change_vector = None
for item in self.raw_items:
entity, metadata, _, _ = Utils.convert_to_entity(item['Data'], self._object_type,
self._store.conventions,
nested_object_types=self._nested_object_type)
if not metadata:
raise InvalidOperationException("Document must have a @metadata field")
document_id = metadata.get('@id', None)
if not id:
raise InvalidOperationException("Document must have a @id field")
last_change_vector = metadata.get('@change-vector', None)
if not last_change_vector:
raise InvalidOperationException("Document must have a @change-vector field")
self._logger.info(
"Got {0} (change vector: [{1}], size {2}".format(document_id, last_change_vector,
len(item["Data"])))
self.items.append(
_SubscriptionBatchItem(change_vector=last_change_vector, document_id=document_id,