Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# if skip is set only one trace is read, everything else makes
# no sense.
break
continue
elif line[0].isalpha():
# header entry
key, value = line.split(':', 1)
key = key.strip()
value = value.strip()
headers[key] = value
elif not headonly:
# data entry - may be written in multiple columns
data.write(line.strip() + ' ')
fh.close()
# create ObsPy stream object
stream = Stream()
# custom header
custom_header = {}
if delta:
custom_header["delta"] = delta
if length:
custom_header["npts"] = length
for headers, data in channels:
# create Stats
header = Stats(custom_header)
header['sh'] = {}
channel = [' ', ' ', ' ']
# generate headers
for key, value in headers.iteritems():
if key == 'DELTA':
header['delta'] = float(value)
:param stations: a list of station names, in the format NET.STA.
:type comps: list of str
:param comps: a list of component names, in Z,N,E,1,2.
:type goal_day: str
:param goal_day: the day of data to load, ISO 8601 format: e.g. 2016-12-31.
:type params: class
:param params: an object containing the config parameters, as obtained by
:func:`msnoise.api.get_params`.
:type responses: :class:`pandas.DataFrame`
:param responses: a DataFrame containing the instrument responses, as
obtained by :func:`msnoise.api.preload_instrument_responses`.
:rtype: :class:`obspy.core.stream.Stream`
:return: A Stream object containing all traces.
"""
datafiles = {}
output = Stream()
MULTIPLEX = False
MULTIPLEX_files = {}
for station in stations:
datafiles[station] = {}
net, sta = station.split('.')
gd = datetime.datetime.strptime(goal_day, '%Y-%m-%d')
files = get_data_availability(
db, net=net, sta=sta, starttime=gd, endtime=gd)
for comp in comps:
datafiles[station][comp] = []
for file in files:
if file.sta != "MULTIPLEX":
if file.comp[-1] not in comps:
continue
fullpath = os.path.join(file.path, file.file)
datafiles[station][file.comp[-1]].append(fullpath)
**preprocessing_kwargs)
# collect trace pairs for correlation
next_day = day + 24 * 3600
stations = sorted({tr.id[:-1] for tr in stream})
tasks = []
for station1, station2 in itertools.combinations_with_replacement(
stations, 2):
if only_auto_correlation and station1 != station2:
continue
if station_combinations and not any(set(station_comb.split('-')) == (
{station1.rsplit('.', 2)[0], station2.rsplit('.', 2)[0]}
if '.' in (station_comb) else
{station1.split('.')[1], station2.split('.')[1]})
for station_comb in station_combinations):
continue
stream1 = Stream([tr for tr in stream if tr.id[:-1] == station1])
stream2 = Stream([tr for tr in stream if tr.id[:-1] == station2])
datetime1 = _midtime(stream1[0].stats)
datetime2 = _midtime(stream2[0].stats)
msg = 'Cannot get coordinates for channel %s datetime %s'
try:
c1 = inventory.get_coordinates(stream1[0].id, datetime=datetime1)
except Exception as ex:
raise RuntimeError(msg % (stream1[0].id, datetime1)) from ex
try:
c2 = inventory.get_coordinates(stream2[0].id, datetime=datetime2)
except Exception as ex:
raise RuntimeError(msg % (stream2[0].id, datetime2)) from ex
args = (c1['latitude'], c1['longitude'],
c2['latitude'], c2['longitude'])
dist, azi, baz = gps2dist_azimuth(*args)
if ('R' in components or 'T' in components) and station1 != station2:
def __plotStraight(self, trace, ax, *args, **kwargs): # @UnusedVariable
"""
Just plots the data samples in the self.stream. Useful for smaller
datasets up to around 1000000 samples (depending on the machine its
being run on).
Slow and high memory consumption for large datasets.
"""
# Copy to avoid any changes to original data.
trace = [tr.copy() for tr in trace]
if len(trace) > 1:
stream = Stream(traces=trace)
# Merge with 'interpolation'. In case of overlaps this method will
# always use the longest available trace.
if hasattr(trace[0].stats, 'preview') and trace[0].stats.preview:
stream = Stream(traces=stream)
stream = mergePreviews(stream)
else:
stream.merge(method=1)
trace = stream[0]
else:
trace = trace[0]
# Check if it is a preview file and adjust accordingly.
# XXX: Will look weird if the preview file is too small.
if hasattr(trace.stats, 'preview') and trace.stats.preview:
# Mask the gaps.
trace.data = np.ma.masked_array(trace.data)
trace.data[trace.data == -1] = np.ma.masked
allocData = C.CFUNCTYPE(C.c_long, C.c_int, C.c_char)(allocate_data)
lil = clibmseed.readMSEEDBuffer(buffer, buflen, selections, unpack_data,
reclen, 0, allocData)
# XXX: Check if the freeing works.
del selections
traces = []
try:
currentID = lil.contents
# Return stream if not traces are found.
except ValueError:
clibmseed.lil_free(lil)
del lil
return Stream()
while True:
# Init header with the essential information.
header = {'network': currentID.network.strip(),
'station': currentID.station.strip(),
'location': currentID.location.strip(),
'channel': currentID.channel.strip(),
'mseed': {'dataquality': currentID.dataquality}}
# Loop over segments.
try:
currentSegment = currentID.firstSegment.contents
except ValueError:
break
while True:
header['sampling_rate'] = currentSegment.samprate
header['starttime'] = \
else:
noise = generate_signal_noise2(1000, 0.05)
signal = generate_signal_expSin(300, 0.005, 0.5, noise,
0.5, 500, 0.05, 1)
# sampling interval
# TODO: fix code below
# if 'data' in locals():
# delta = data[0].stats.sampling_rate
# else:
delta = 0.01
tr = Trace(signal)
tr.stats.delta = delta
tr.stats.channel = 'HHZ'
st = Stream(tr)
frequencies = make_LinFq(1, 40, delta, 8)
CN_HP, CN_LP = rec_filter_coeff(frequencies, delta)
filter_norm = rec_filter_norm(frequencies, delta, CN_HP, CN_LP, 2)
YN, CF, Tn, Nb = MBfilter_CF(st, frequencies, CN_HP, CN_LP, filter_norm,
var_w=False, CF_type='kurtosis',
CF_decay_win=0.1)
fig1 = plt.figure()
fig2 = plt.figure()
max2 = CF.max()
for n in range(Nb):
ax1 = fig1.add_subplot(Nb+1, 1, n+1)
ax2 = fig2.add_subplot(Nb+1, 1, n+1)
# ax2.set_ylim((0, max2))
# SAC alpha
if(args.asc != "None"):
# aquire resp, pzs and stations files
if(args.Resp != "None"):
resfiles = findPazFiles(args.Resp,1)
if(args.Paz != "None"):
pazfiles = findPazFiles(args.Paz,2)
if(args.stsC != "None"):
stations = args.stsC
# associate
asc = initStats(asc,args)
asc = associateNamesFilesToTraces(asc,resfiles,pazfiles,stations)
# Combine all streams
all = Stream()
if(args.fseed != "None"):
all = all + fse
if(args.mseed != "None"):
all = all + mse
if(args.sac != "None"):
all = all + sac
if(args.asc != "None"):
all = all + asc
return all
t = UTCDateTime("%s-%s-%sT%s" % (year, month, day, time))
sampling_rate = 1.0 / float(items[6][1].decode())
dtype = items[1][1].decode()
if dtype.upper() == "LONG":
data = from_buffer(data, dtype=np.int16)
elif dtype.upper() == "SHORT":
data = from_buffer(data, dtype=np.int8)
else:
raise NotImplementedError()
tr = Trace(data=data)
tr.stats.starttime = t
tr.stats.sampling_rate = sampling_rate
tr.stats._format = "PDAS"
tr.stats.pdas = extra_headers
st = Stream(traces=[tr])
return st
def _makeOpStream(self, ind, row, traceLimit):
"""
Make an obspy stream of the multiplexed data stored in main detex
DataFrame
"""
st = obspy.core.Stream()
count = 0
if 'AlignedTD' in row: # if this is a subspace
for key in row.Events:
if count < traceLimit:
tr = obspy.core.Trace(data=row.AlignedTD[key])
tr.stats.channel = key
tr.stats.network = row.Name
tr.stats.station = row.Station
st += tr
count += 1
return st
else: # if this is a single event
for key in row.Events:
tr = obspy.core.Trace(data=row.MPtd[key])
tr.stats.channel = key
tr.stats.station = row.Station