Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
return f % random.randrange(16 ** l)
if __name__ == '__main__':
if len(sys.argv) < 5:
print('Usage: %s RPS requestSize selfHost:port partner1Host:port partner2Host:port ...' % sys.argv[0])
sys.exit(-1)
numCommands = int(sys.argv[1])
cmdSize = int(sys.argv[2])
selfAddr = sys.argv[3]
if selfAddr == 'readonly':
selfAddr = None
partners = sys.argv[4:]
maxCommandsQueueSize = int(0.9 * SyncObjConf().commandsQueueSize / len(partners))
obj = TestObj(selfAddr, partners)
while obj._getLeader() is None:
time.sleep(0.5)
time.sleep(4.0)
startTime = time.time()
while time.time() - startTime < 25.0:
st = time.time()
for i in xrange(0, numCommands):
obj.testMethod(getRandStr(cmdSize), callback=clbck)
_g_sent += 1
delta = time.time() - st
return f % random.randrange(16 ** l)
if __name__ == '__main__':
if len(sys.argv) < 5:
print('Usage: %s RPS requestSize selfHost:port partner1Host:port partner2Host:port ...' % sys.argv[0])
sys.exit(-1)
numCommands = int(sys.argv[1])
cmdSize = int(sys.argv[2])
selfAddr = sys.argv[3]
if selfAddr == 'readonly':
selfAddr = None
partners = sys.argv[4:]
maxCommandsQueueSize = int(0.9 * SyncObjConf().commandsQueueSize / len(partners))
obj = TestObj(selfAddr, partners)
while obj._getLeader() is None:
time.sleep(0.5)
time.sleep(4.0)
startTime = time.time()
while time.time() - startTime < 25.0:
st = time.time()
for i in xrange(0, numCommands):
obj.testMethod(getRandStr(cmdSize), time.time(), callback=clbck)
_g_sent += 1
delta = time.time() - st
def setUp(self):
self.conf = SyncObjConf(appendEntriesUseBatch=False, appendEntriesPeriod=0.001, journalFile='foo.journal',
raftMinTimeout=0.004, raftMaxTimeout=0.005, autoTickPeriod=0.001)
callback = Mock()
callback.replicated = False
self.so = KVStoreTTL('127.0.0.1:1234', [], self.conf, on_set=callback, on_delete=callback)
self.so.set_retry_timeout(10)
def setUp(self):
self.conf = SyncObjConf(appendEntriesUseBatch=False, dynamicMembershipChange=True, autoTick=False)
self.so = DynMemberSyncObj('127.0.0.1:1234', ['127.0.0.1:1235'], self.conf)
def setUp(self):
self.temp_dir = TemporaryDirectory()
host = '0.0.0.0'
port = get_free_port()
seed_addr = None
conf = SyncObjConf(
fullDumpFile=self.temp_dir.name + '/supervise.zip',
logCompactionMinTime=300,
dynamicMembershipChange=True
)
data_dir = self.temp_dir.name + '/supervise'
grpc_port = get_free_port()
grpc_max_workers = 10
http_port = get_free_port()
logger = getLogger(NAME)
log_handler = StreamHandler()
logger.setLevel(ERROR)
log_handler.setLevel(INFO)
log_format = Formatter('%(asctime)s - %(levelname)s - %(pathname)s:%(lineno)d - %(message)s')
log_handler.setFormatter(log_format)
logger.addHandler(log_handler)
http_logger = getLogger(NAME + '_http')
def setUp(self):
self.temp_dir = TemporaryDirectory()
self.example_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), '../example'))
host = '0.0.0.0'
port = get_free_port()
seed_addr = None
conf = SyncObjConf(
fullDumpFile=self.temp_dir.name + '/supervise.zip',
logCompactionMinTime=300,
dynamicMembershipChange=True
)
data_dir = self.temp_dir.name + '/supervise'
grpc_port = get_free_port()
grpc_max_workers = 10
http_port = get_free_port()
logger = getLogger(NAME)
log_handler = StreamHandler()
logger.setLevel(ERROR)
log_handler.setLevel(INFO)
log_format = Formatter('%(asctime)s - %(levelname)s - %(pathname)s:%(lineno)d - %(message)s')
log_handler.setFormatter(log_format)
logger.addHandler(log_handler)
http_logger = getLogger(NAME + '_http')
def __init__(self, selfNodeAddr, otherNodeAddrs,
testType = TEST_TYPE.DEFAULT,
compactionMinEntries = 0,
dumpFile = None,
journalFile = None,
password = None,
dynamicMembershipChange = False,
useFork = True,
testBindAddr = False):
cfg = SyncObjConf(autoTick=False, appendEntriesUseBatch=False)
cfg.appendEntriesPeriod = 0.1
cfg.raftMinTimeout = 0.5
cfg.raftMaxTimeout = 1.0
cfg.dynamicMembershipChange = dynamicMembershipChange
if testBindAddr:
cfg.bindAddress = selfNodeAddr
if dumpFile is not None:
cfg.fullDumpFile = dumpFile
if password is not None:
cfg.password = password
cfg.useFork = useFork
http_logger.setLevel(INFO)
http_log_handler.setLevel(INFO)
# set http log format
http_handler_format = Formatter('%(message)s')
http_log_handler.setFormatter(http_handler_format)
# add http log handler
http_logger.addHandler(http_log_handler)
# metrics registry
metrics_registry = CollectorRegistry()
# sync config
os.makedirs(os.path.dirname(snapshot_file), exist_ok=True)
conf = SyncObjConf()
conf.fullDumpFile = snapshot_file
conf.logCompactionMinEntries = log_compaction_min_entries
conf.logCompactionMinTime = log_compaction_min_time
conf.dynamicMembershipChange = True
conf.validate()
supervisor = None
try:
supervisor = Manager(host=host, port=port, seed_addr=peer_addr, conf=conf, data_dir=data_dir,
grpc_port=grpc_port, grpc_max_workers=grpc_max_workers, http_port=http_port,
logger=logger, http_logger=http_logger, metrics_registry=metrics_registry)
while True:
signal.pause()
except Exception as ex:
print(ex)
finally:
def __init__(self, selfAddress, partnerAddrs, dumpFile):
conf = SyncObjConf(
fullDumpFile=dumpFile,
)
super(KVStorage, self).__init__(selfAddress, partnerAddrs, conf)
self.__data = {}
def __init__(self, host='localhost', port=7070, peer_addrs=None, conf=SyncObjConf(),
index_dir='/tmp/cockatrice/index', logger=getLogger(), metrics_registry=CollectorRegistry()):
self.__logger = logger
self.__metrics_registry = metrics_registry
# metrics
self.__metrics_core_documents = Gauge(
'{0}_index_core_documents'.format(NAME),
'The number of documents.',
[
'index_name',
],
registry=self.__metrics_registry
)
self.__metrics_core_requests_total = Counter(
'{0}_index_core_requests_total'.format(NAME),
'The number of requests.',