Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
value = _unpack_string(data)
ah_stats.usrattr[key] = value
# unpack data using dtype from record info
if dtype == 1:
# float
temp = data.unpack_farray(ndata, data.unpack_float)
elif dtype == 6:
# double
temp = data.unpack_farray(ndata, data.unpack_double)
else:
# e.g. 3 (vector), 2 (complex), 4 (tensor)
msg = 'Unsupported AH v2 record type %d'
raise NotImplementedError(msg % (dtype))
tr = Trace(np.array(temp))
tr.stats.ah = ah_stats
tr.stats.delta = ah_stats.record.delta
tr.stats.starttime = ah_stats.record.start_time
tr.stats.station = ah_stats.station.code
tr.stats.channel = ah_stats.station.channel
return tr
def save_trace(header, average, filename):
header.npts = len(average)
trace = Trace(data=average, header=header)
trace.write(filename, 'H5', mode='a')
return
# extras
ah_stats.extras = data.unpack_array(data.unpack_float)
# unpack data using dtype from record info
if dtype == 1:
# float
temp = data.unpack_farray(ndata, data.unpack_float)
elif dtype == 6:
# double
temp = data.unpack_farray(ndata, data.unpack_double)
else:
# e.g. 3 (vector), 2 (complex), 4 (tensor)
msg = 'Unsupported AH v1 record type %d'
raise NotImplementedError(msg % (dtype))
tr = Trace(np.array(temp))
tr.stats.ah = ah_stats
tr.stats.delta = ah_stats.record.delta
tr.stats.starttime = ah_stats.record.start_time
tr.stats.station = ah_stats.station.code
tr.stats.channel = ah_stats.station.channel
return tr
header['sh']['ORIGIN'] = to_utcdatetime(value)
elif key:
key = INVERTED_SH_IDX.get(key, key)
if key in SH_KEYS_INT:
header['sh'][key] = int(value)
elif key in SH_KEYS_FLOAT:
header['sh'][key] = float(value)
else:
header['sh'][key] = value
# set channel code
header['channel'] = ''.join(channel)
# remember record number
header['sh']['RECNO'] = len(stream) + 1
if headonly:
# skip data
stream.append(Trace(header=header))
else:
if not npts:
stream.append(Trace(header=header))
continue
# read data
data = fh_data.read(npts * 4)
dtype = byteorder + 'f4'
data = np.fromstring(data, dtype=dtype)
# convert to system byte order
data = np.require(data, '=f4')
stream.append(Trace(data=data, header=header))
if not headonly:
fh_data.close()
return stream
def __init__(self, **kwargs):
"""
Checks some variables and maps the kwargs to class variables.
"""
self.kwargs = kwargs
self.stream = kwargs.get('stream')
# Check if it is a Stream or a Trace object.
if isinstance(self.stream, Trace):
self.stream = Stream([self.stream])
elif not isinstance(self.stream, Stream):
msg = 'Plotting is only supported for Stream or Trace objects.'
raise TypeError(msg)
# Stream object should contain at least one Trace
if len(self.stream) < 1:
msg = "Empty stream object"
raise IndexError(msg)
self.stream = self.stream.copy()
# Type of the plot.
self.type = kwargs.get('type', 'normal')
# Start and end times of the plots.
self.starttime = kwargs.get('starttime', None)
self.endtime = kwargs.get('endtime', None)
self.fig_obj = kwargs.get('fig', None)
# If no times are given take the min/max values from the stream object.
def _internal_process(st, lowcut, highcut, filt_order, sampling_rate,
first_length, stachan, debug, i=0):
tr = st.select(station=stachan[0], channel=stachan[1])
if len(tr) == 0:
tr = Trace(np.zeros(int(first_length * sampling_rate)))
tr.stats.station = stachan[0]
tr.stats.channel = stachan[1]
tr.stats.sampling_rate = sampling_rate
tr.stats.starttime = st[0].stats.starttime # Do this to make more
# sensible plots
warnings.warn('Padding stream with zero trace for ' +
'station ' + stachan[0] + '.' + stachan[1])
elif len(tr) == 1:
tr = tr[0]
tr.detrend('simple')
tr = pre_processing.process(
tr=tr, lowcut=lowcut, highcut=highcut, filt_order=filt_order,
samp_rate=sampling_rate, debug=debug, seisan_chan_names=False)
else:
msg = ('Multiple channels for ' + stachan[0] + '.' +
stachan[1] + ' in a single design stream.')
# remember record number
header['sh']['RECNO'] = len(stream) + 1
if headonly:
# skip data
stream.append(Trace(header=header))
else:
if not npts:
stream.append(Trace(header=header))
continue
# read data
data = fh_data.read(npts * 4)
dtype = byteorder + 'f4'
data = np.fromstring(data, dtype=dtype)
# convert to system byte order
data = np.require(data, '=f4')
stream.append(Trace(data=data, header=header))
if not headonly:
fh_data.close()
return stream
:rtype: :class:`~obspy.core.stream.Stream`
:returns: Stream object containing header and data.
.. rubric:: Example
>>> from obspy import read
>>> st = read("/path/to/loc_RJOB20050831023349.z")
"""
traces = []
with open(filename, 'rb') as f:
# reading multiple gse2 parts
while True:
try:
if headonly:
header = libgse2.read_header(f)
traces.append(Trace(header=header))
else:
header, data = libgse2.read(f, verify_chksum=verify_chksum)
traces.append(Trace(header=header, data=data))
except EOFError:
break
return Stream(traces=traces)
unpacked_header_endian=deepcopy(self.__dict__['endian']),
data=dict((k, deepcopy(v)) for k, v in self.__dict__.items()
if k not in ('unpacked_data', 'endian')))
return ad
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
# Monkey patch the __str__ method for the all Trace instances used in the
# following.
# XXX: Check if this is not messing anything up. Patching every single
# instance did not reliably work.
setattr(Trace, '__original_str__', Trace.__str__)
setattr(Trace, '__str__', _segy_trace_str_)
correlation_temp[midsample:] = correlation[0:midsample]
else:
midsample = int(n/2)
print 'correlation length odd'
correlation_temp[0:midsample] = correlation[midsample+1:]
correlation_temp[midsample+1:] = correlation[0:midsample]
# Test purposes
np.save(inp.outdir+'test_G1_f0.npy',G1_f0)
np.save(inp.outdir+'test_G2_f0.npy',G2_f0)
correlation = correlation_temp
# Determine the distance in meters between the two receivers
dist_meters = gps2DistAzimuth(lat1,lon1,lat2,lon2)[0]
corr = Trace()
corr.stats=test_seism.stats
corr.data = correlation
corr.stats.sac={}
corr.stats.sac['dist'] = dist_meters
corr.stats.sac['stla'] = lat1
corr.stats.sac['stlo'] = lon1
corr.stats.sac['evla'] = lat2
corr.stats.sac['evlo'] = lon2
corr_test = corr.copy()
corr_test.data = correlation_test
return corr, K, corr_test