Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _test_resumetoken_uniterated_nonempty_batch(self, resume_option):
# When the batch is not empty and hasn't been iterated at all.
# Resume token should be same as the resume option used.
resume_point = self.get_resume_token()
# Insert some documents so that firstBatch isn't empty.
self.watched_collection(
write_concern=WriteConcern("majority")).insert_many(
[{'a': 1}, {'b': 2}, {'c': 3}])
# Resume token should be same as the resume option.
with self.change_stream(
**{resume_option: resume_point}) as change_stream:
self.assertTrue(change_stream._cursor._has_next())
resume_token = change_stream.resume_token
self.assertEqual(resume_token, resume_point)
def test_ship_of_theseus(self):
c = MongoClient(
self.seed,
replicaSet=self.name,
serverSelectionTimeoutMS=self.server_selection_timeout)
db = c.get_database(
"pymongo_test",
write_concern=WriteConcern(w=len(c.secondaries) + 1))
db.test.insert_one({})
find_one = db.test.find_one
primary = ha_tools.get_primary()
secondary1 = ha_tools.get_random_secondary()
new_hosts = []
for i in range(3):
new_hosts.append(ha_tools.add_member())
# RS closes all connections after reconfig.
for j in xrange(30):
try:
if ha_tools.get_primary():
break
except (ConnectionFailure, OperationFailure):
def test_errors(self):
# We must call getlasterror, etc. on same socket as last operation.
db = rs_or_single_client(maxPoolSize=1).pymongo_test
db.reset_error_history()
self.assertEqual(None, db.error())
if client_context.supports_getpreverror:
self.assertEqual(None, db.previous_error())
db.test.insert_one({"_id": 1})
unacked = db.test.with_options(write_concern=WriteConcern(w=0))
unacked.insert_one({"_id": 1})
self.assertTrue(db.error())
if client_context.supports_getpreverror:
self.assertTrue(db.previous_error())
unacked.insert_one({"_id": 1})
self.assertTrue(db.error())
if client_context.supports_getpreverror:
prev_error = db.previous_error()
self.assertEqual(prev_error["nPrev"], 1)
del prev_error["nPrev"]
prev_error.pop("lastOp", None)
error = db.error()
error.pop("lastOp", None)
def setUpClass(cls):
super(TestCommandAndReadPreference, cls).setUpClass()
cls.c = ReadPrefTester(
client_context.pair,
replicaSet=cls.name,
# Ignore round trip times, to test ReadPreference modes only.
localThresholdMS=1000*1000)
if client_context.auth_enabled:
cls.c.admin.authenticate(db_user, db_pwd)
cls.client_version = Version.from_client(cls.c)
# mapReduce and group fail with no collection
coll = cls.c.pymongo_test.get_collection(
'test', write_concern=WriteConcern(w=cls.w))
coll.insert_one({})
def insert_command_default_write_concern():
collection.database.command(
'insert', 'collection', documents=[{}],
write_concern=WriteConcern())
def setUp(self):
super(TestReadPreferencesBase, self).setUp()
# Insert some data so we can use cursors in read_from_which_host
self.client.pymongo_test.test.drop()
self.client.get_database(
"pymongo_test",
write_concern=WriteConcern(w=self.w)).test.insert_many(
[{'_id': i} for i in range(10)])
self.addCleanup(self.client.pymongo_test.test.drop)
(other_secondary, ) = [
s for s in secondaries
if ha_tools.get_tags(s)['name'] == 'other_secondary']
self.other_secondary = partition_node(other_secondary)
self.other_secondary_tags = ha_tools.get_tags(other_secondary)
self.other_secondary_dc = {'dc': self.other_secondary_tags['dc']}
self.c = MongoClient(
self.seed,
replicaSet=self.name,
serverSelectionTimeoutMS=self.server_selection_timeout)
self.w = len(self.c.secondaries) + 1
self.db = self.c.get_database("pymongo_test",
write_concern=WriteConcern(w=self.w))
self.db.test.delete_many({})
self.db.test.insert_many([{'foo': i} for i in xrange(10)])
self.clear_ping_times()
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
else:
index['collation'] = collation
cmd = SON([('createIndexes', self.name), ('indexes', [index])])
try:
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
write_concern=self.write_concern,
parse_write_concern_error=True)
except OperationFailure as exc:
if exc.code in common.COMMAND_NOT_FOUND_CODES:
index["ns"] = self.__full_name
wcn = (self.write_concern if
self.write_concern.acknowledged else WriteConcern())
self.__database.system.indexes._insert(
sock_info, index, True, False, False, wcn)
else:
raise
def _finish_transaction(self, command_name, retrying):
opts = self._transaction.opts
wc = opts.write_concern
cmd = SON([(command_name, 1)])
if command_name == "commitTransaction":
if opts.max_commit_time_ms:
cmd['maxTimeMS'] = opts.max_commit_time_ms
# Transaction spec says that after the initial commit attempt,
# subsequent commitTransaction commands should be upgraded to use
# w:"majority" and set a default value of 10 seconds for wtimeout.
if retrying:
wc_doc = wc.document
wc_doc["w"] = "majority"
wc_doc.setdefault("wtimeout", 10000)
wc = WriteConcern(**wc_doc)
if self._transaction.recovery_token:
cmd['recoveryToken'] = self._transaction.recovery_token
with self._client._socket_for_writes(self) as sock_info:
return self._client.admin._command(
sock_info,
cmd,
session=self,
write_concern=wc,
parse_write_concern_error=True)
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
else:
index['collation'] = collation
cmd = SON([('createIndexes', self.name), ('indexes', [index])])
try:
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
write_concern=self.write_concern,
parse_write_concern_error=True)
except OperationFailure as exc:
if exc.code in common.COMMAND_NOT_FOUND_CODES:
index["ns"] = self.__full_name
wcn = (self.write_concern if
self.write_concern.acknowledged else WriteConcern())
self.__database.system.indexes._insert(
sock_info, index, True, False, False, wcn)
else:
raise