Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_positive_gradient(self):
from OP_waveforms import stream_positive_derivative
from obspy.core import read
base_path = self.wo.opdict['base_path']
test_datadir = self.wo.opdict['test_datadir']
st = read(os.path.join(base_path, test_datadir, 'raw_data',
'YA.UV15.00.HHZ.MSEED'))
tr = st[0]
npts = len(tr.data)
dt = tr.stats.delta
x = np.arange(npts)*dt
# set up a polynomial function
y = (3+2*x+4*x*x+5*x*x*x)
dy_exp = (2+8*x+15*x*x)
tr.data = y
st = stream_positive_derivative(st)
np.testing.assert_almost_equal(tr.data[20:100], dy_exp[20:100], 2)
if file.comp[-1] not in comps:
continue
fullpath = os.path.join(file.path, file.file)
datafiles[station][file.comp[-1]].append(fullpath)
else:
MULTIPLEX = True
print("Mutliplex mode, reading the files")
fullpath = os.path.join(file.path, file.file)
multiplexed = sorted(glob.glob(fullpath))
for comp in comps:
for fn in multiplexed:
if fn in MULTIPLEX_files:
_ = MULTIPLEX_files[fn]
else:
# print("Reading %s" % fn)
_ = read(fn, format=params.archive_format or None)
traces = []
for tr in _:
if "%s.%s" % (tr.stats.network, tr.stats.station) in stations and tr.stats.channel[-1] in comps:
traces.append(tr)
del _
_ = Stream(traces=traces)
MULTIPLEX_files[fn] = _
datafiles[station][comp].append(_)
for istation, station in enumerate(stations):
net, sta = station.split(".")
for comp in comps:
files = eval("datafiles['%s']['%s']" % (station, comp))
if len(files) != 0:
logger.debug("%s.%s Reading %i Files" %
(station, comp, len(files)))
def read(pathname_or_url, *args, ** kwargs): # format=None, headonly=False,
"""
Read waveform files into an Stream object.
Doc of obspy.core.read:
"""
pathname_or_url, ext = os.path.splitext(pathname_or_url)
ms = obspy.core.read(pathname_or_url + ext, *args, ** kwargs)
ignore_starttime = kwargs.get('ignore_starttime', False)
content = []
hatfiles = glob.glob(pathname_or_url + '.HAT')
hatfiles.sort()
for hatfile in hatfiles:
with open(hatfile, 'r') as file_:
file_content = file_.read().strip('\n')
file_content = file_content.replace('nan', 'np.nan')
if file_content != '':
content.extend(file_content.split('\n'))
if len(content) > 0 and len(content) == len(ms):
for i in range(len(ms)):
try:
st = eval(content[i]) # quiet slow and of course very bad
except NameError as ex:
if 'masked' in content[i]:
ofid.write('\nHi I am rank number %d and I am processing the following files for you: \n' %rank)
for fname in mycontent:
ofid.write(fname+'\n')
for filepath in mycontent:
filename=filepath.split('/')[-1]
if verbose==True:
ofid.write('\n========================================================================================\n')
ofid.write('opening file: '+filepath+'\n')
t=time.time()-t0
ofid.write('Time elapsed since start: '+str(t)+'\n')
#- read data
try:
data=read(filepath)
except TypeError:
if verbose==True:
ofid.write('file could not be opened, skip.')
continue
except IOError:
if verbose:
ofid.write('file could not be opened, skip.')
continue
#- initialize a stream that is plotted in case check option is set
if check==True:
cstr=Stream()
#- initialize the stream that recollects the trace segments
colloc_data=Stream()
#- ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#if options.keybindings:
# PickingGUI()
# for i in self.dictKeybindings.items():
# print i
# return
if options.local:
streams=[]
streams.append(read('20091227_105240_Z.RJOB'))
streams[0].append(read('20091227_105240_N.RJOB')[0])
streams[0].append(read('20091227_105240_E.RJOB')[0])
streams.append(read('20091227_105240_Z.RMOA'))
streams[1].append(read('20091227_105240_N.RMOA')[0])
streams[1].append(read('20091227_105240_E.RMOA')[0])
streams.append(read('20091227_105240_Z.RNON'))
streams[2].append(read('20091227_105240_N.RNON')[0])
streams[2].append(read('20091227_105240_E.RNON')[0])
streams.append(read('20091227_105240_Z.RTBE'))
streams[3].append(read('20091227_105240_N.RTBE')[0])
streams[3].append(read('20091227_105240_E.RTBE')[0])
streams.append(read('20091227_105240_Z.RWMO'))
streams[4].append(read('20091227_105240_N.RWMO')[0])
streams[4].append(read('20091227_105240_E.RWMO')[0])
#streams=[]
#streams.append(read('RJOB_061005_072159.ehz.new'))
#streams[0].append(read('RJOB_061005_072159.ehn.new')[0])
#streams[0].append(read('RJOB_061005_072159.ehe.new')[0])
#streams.append(read('RNON_160505_000059.ehz.new'))
#streams.append(read('RMOA_160505_014459.ehz.new'))
#streams[2].append(read('RMOA_160505_014459.ehn.new')[0])
#streams[2].append(read('RMOA_160505_014459.ehe.new')[0])
else:
def _getRA(self,ftc,evedf,sta):
CorDF=pd.DataFrame(index=evedf.Event.values,columns=['Xcor','STALTA','TimeStamp','SampRate','MaxCC','MaxSTALTA','threshold','Nc'])
#CorDF['FilesToCorr']=FilesToCorr
conStream=self._applyFilter(obspy.core.read(ftc),condat=True)
if not isinstance(conStream,obspy.core.stream.Stream):
return None,None
CorDF['Nc']=len(list(set([x.stats.channel for x in conStream])))
CorDF['SampRate']=conStream[0].stats.sampling_rate
MPcon,TR=self.multiplex(conStream,evedf.Nc.median(),retTR=True)
CorDF['TimeStamp']=min([x.stats.starttime.timestamp for x in TR])
#get continous data parameters for Xcor
MPconFD=scipy.fftpack.fft(MPcon,n=2**int(evedf.reqlen.median()).bit_length())
n = int(np.median([len(x) for x in evedf.MPtem])) ##TODO This assumes all templates are of equal length, if not will break
a = pd.rolling_mean(MPcon, n)[n-1:]
b = pd.rolling_std(MPcon, n)[n-1:]
b *= np.sqrt((n-1.0) / n)
for corevent,corrow in CorDF.iterrows():
evrow=evedf[evedf.Event==corevent].iloc[0]
# PickingGUI()
# for i in self.dictKeybindings.items():
# print i
# return
if options.local:
streams=[]
streams.append(read('20091227_105240_Z.RJOB'))
streams[0].append(read('20091227_105240_N.RJOB')[0])
streams[0].append(read('20091227_105240_E.RJOB')[0])
streams.append(read('20091227_105240_Z.RMOA'))
streams[1].append(read('20091227_105240_N.RMOA')[0])
streams[1].append(read('20091227_105240_E.RMOA')[0])
streams.append(read('20091227_105240_Z.RNON'))
streams[2].append(read('20091227_105240_N.RNON')[0])
streams[2].append(read('20091227_105240_E.RNON')[0])
streams.append(read('20091227_105240_Z.RTBE'))
streams[3].append(read('20091227_105240_N.RTBE')[0])
streams[3].append(read('20091227_105240_E.RTBE')[0])
streams.append(read('20091227_105240_Z.RWMO'))
streams[4].append(read('20091227_105240_N.RWMO')[0])
streams[4].append(read('20091227_105240_E.RWMO')[0])
#streams=[]
#streams.append(read('RJOB_061005_072159.ehz.new'))
#streams[0].append(read('RJOB_061005_072159.ehn.new')[0])
#streams[0].append(read('RJOB_061005_072159.ehe.new')[0])
#streams.append(read('RNON_160505_000059.ehz.new'))
#streams.append(read('RMOA_160505_014459.ehz.new'))
#streams[2].append(read('RMOA_160505_014459.ehn.new')[0])
#streams[2].append(read('RMOA_160505_014459.ehe.new')[0])
else:
try:
(options, args) = parser.parse_args()
for req in ['-d','-t','-i']:
if not getattr(parser.values,parser.get_option(req).dest):
parser.print_help()
return
#if options.keybindings:
# PickingGUI()
# for i in self.dictKeybindings.items():
# print i
# return
if options.local:
streams=[]
streams.append(read('20091227_105240_Z.RJOB'))
streams[0].append(read('20091227_105240_N.RJOB')[0])
streams[0].append(read('20091227_105240_E.RJOB')[0])
streams.append(read('20091227_105240_Z.RMOA'))
streams[1].append(read('20091227_105240_N.RMOA')[0])
streams[1].append(read('20091227_105240_E.RMOA')[0])
streams.append(read('20091227_105240_Z.RNON'))
streams[2].append(read('20091227_105240_N.RNON')[0])
streams[2].append(read('20091227_105240_E.RNON')[0])
streams.append(read('20091227_105240_Z.RTBE'))
streams[3].append(read('20091227_105240_N.RTBE')[0])
streams[3].append(read('20091227_105240_E.RTBE')[0])
streams.append(read('20091227_105240_Z.RWMO'))
streams[4].append(read('20091227_105240_N.RWMO')[0])
streams[4].append(read('20091227_105240_E.RWMO')[0])
#streams=[]
#streams.append(read('RJOB_061005_072159.ehz.new'))
#streams[0].append(read('RJOB_061005_072159.ehn.new')[0])
:param stream: unfiltered obspy.core.stream.Stream object.
:param id_: the id of the stream to read.
:param startdate: the startdate configuration value.
:param enddate: the enddate configuration value.
:param goal_sampling_rate: the sampling rate.
:paran logger: the logger instance to use for logging.
"""
added = 0
modified = 0
unchanged = 0
for basename in files:
pathname = os.path.join(folder, basename)
# logger.debug('reading file %s' % pathname)
try:
# Note: if format is None or unknown, obspy will use auto-detection.
stream = obspy.core.read(pathname, headonly=True,
format=archive_format or None)
for id in set([t.id for t in stream]):
update_rv = process_stream(db, folder, basename, stream, id,
startdate, enddate,
goal_sampling_rate)
if update_rv == 1:
added += 1
elif update_rv == -1:
modified += 1
else:
unchanged += 1
except obspy.io.mseed.ObsPyMSEEDFilesizeTooSmallError as e:
logger.warning("Ignoring possible empty file '%s'."
' Got error %s: %s' %
(pathname, e.__class__.__name__, str(e)))
except OSError as e:
#- Create a trace object and fill in the basic information
tr_correlation_stack=Trace()
tr_correlation_stack.stats.sampling_rate=dat1.stats.sampling_rate
tr_correlation_stack.data=correlation_stack
#- open file and write correlation function
fileid_correlation_stack=outdir+'/stacks/'+fn1+'.'+fn2+'.'+corr_type+'.'+corrname+'.'+tformat
#- linear stack
if os.path.exists(fileid_correlation_stack)==True:
if verbose:
print "Correlation stack already exists. Add to previous one."
tr_old=read(fileid_correlation_stack)[0]
tr_correlation_stack.data=tr_correlation_stack.data+tr_old.data
if tformat=='SAC':
tr_correlation_stack.stats=tr_old.stats
tr_correlation_stack.stats.sac['user0']+=n
tr_correlation_stack.stats.sac['user1']+=tslen
tr_correlation_stack.write(fileid_correlation_stack, format=tformat)
else:
if tformat=='SAC':
tr_correlation_stack.stats.sac={}
tr_correlation_stack.stats.starttime=UTCDateTime(2000, 01, 01)-maxlag
tr_correlation_stack.stats.network=dat1.stats.network
tr_correlation_stack.stats.station=dat2.stats.station
tr_correlation_stack.stats.channel=dat1.stats.channel
tr_correlation_stack.stats.sac['b']=-maxlag