Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, **kw):
super(ThreadProfile, self).__init__(**kw)
self._local_trace_backup = self._local_trace
def _enable(self):
self._local_trace = self._local_trace_backup
threading.settrace(self._global_trace)
super(ThreadProfile, self)._enable()
def _disable(self):
super(ThreadProfile, self)._disable()
threading.settrace(None)
self._local_trace = None
class StatisticProfile(ProfileBase, ProfileRunnerBase):
"""
Statistic profiling class.
This class does not gather its own samples by itself.
Instead, it must be provided with call stacks (as returned by
sys._getframe() or sys._current_frames()).
"""
def __init__(self):
super(StatisticProfile, self).__init__()
self.total_time = 1
def sample(self, frame):
getFileTiming = self._getFileTiming
called_timing = getFileTiming(frame)
called_code = frame.f_code
called_timing.hit(called_code, frame.f_lineno, 0)
sys.path[:] = original_sys_path
def runmodule(self, module, argv):
original_sys_argv = list(sys.argv)
original_sys_path0 = sys.path[0]
try:
sys.path[0] = os.getcwd()
sys.argv[:] = argv
with self():
runpy.run_module(module, run_name='__main__', alter_sys=True)
finally:
sys.argv[:] = original_sys_argv
sys.path[0] = original_sys_path0
return self
class Profile(ProfileBase, ProfileRunnerBase):
"""
Deterministic, recursive, line-granularity, profiling class.
Does not require any source code change to work.
If the performance hit is too large, it can benefit from some
integration (calling enable/disable around selected code chunks).
The sum of time spent in all profiled lines is less than the total
profiled time reported. This is (part of) profiling overhead.
This also mans that sum of time-spent-on-line percentage is less than 100%.
All times are "internal time", ie they do not count time spent inside
called (profilable, so python) functions.
"""
__slots__ = (
'_global_trace',