Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
outty = type(outarr)
outzer = pycbc.types.zeros(len(outarr))
# If we give an output array that is wrong only in length, raise ValueError:
out_badlen = outty(pycbc.types.zeros(len(outarr)+1),dtype=outarr.dtype,**other_args)
args = [inarr, out_badlen]
tc.assertRaises(ValueError, pycbc.fft.fft, *args)
# If we give an output array that has the wrong precision, raise ValueError:
out_badprec = outty(outzer,dtype=_other_prec[dtype(outarr).type],**other_args)
args = [inarr, out_badprec]
tc.assertRaises(ValueError,pycbc.fft.fft,*args)
# If we give an output array that has the wrong kind (real or complex) but
# correct precision, then raise a ValueError. This only makes sense if we try
# to do either C2R or R2R.
out_badkind = outty(outzer,dtype=_bad_dtype[dtype(inarr).type],**other_args)
args = [inarr, out_badkind]
tc.assertRaises(ValueError,pycbc.fft.fft,*args)
# If we give an output array that isn't a PyCBC type, raise TypeError:
out_badtype = numpy.zeros(len(outarr),dtype=outarr.dtype)
args = [inarr, out_badtype]
tc.assertRaises(TypeError,pycbc.fft.fft,*args)
# If we give an input array that isn't a PyCBC type, raise TypeError:
in_badtype = numpy.zeros(len(inarr),dtype=inarr.dtype)
args = [in_badtype, outarr]
tc.assertRaises(TypeError,pycbc.fft.fft,*args)
if dtype(outarr).kind == 'c':
outarr._data[:] = randn(len(outarr))+1j*randn(len(outarr))
# If we're going to do a HC2R transform we must worry about DC/Nyquist imaginary
if dtype(inarr).kind == 'f':
outarr._data[0] = real(outarr[0])
if (len(inarr)%2)==0:
outarr._data[len(outarr)-1] = real(outarr[len(outarr)-1])
else:
outarr._data[:] = randn(len(outarr))
inarr.clear()
outcopy = type(outarr)(outarr)
if type(outarr) == pycbc.types.Array:
outcopy *= len(inarr)
with tc.context:
pycbc.fft.ifft(outarr, inarr)
pycbc.fft.fft(inarr, outarr)
emsg="FFT(IFFT(random)) did not reproduce original array to within tolerance {0}".format(tol)
if isinstance(outcopy,ts) or isinstance(outcopy,fs):
tc.assertTrue(outcopy.almost_equal_norm(outarr,tol=tol,dtol=tol),
msg=emsg)
else:
tc.assertTrue(outcopy.almost_equal_norm(outarr,tol=tol),
msg=emsg)
if tlen < len(self):
raise ValueError("The value of delta_f (%s) would be "
"undersampled. Maximum delta_f "
"is %s." % (delta_f, 1.0 / self.duration))
if not delta_f:
tmp = self
else:
tmp = TimeSeries(zeros(tlen, dtype=self.dtype),
delta_t=self.delta_t, epoch=self.start_time)
tmp[:len(self)] = self[:]
f = FrequencySeries(zeros(flen,
dtype=complex_same_precision_as(self)),
delta_f=delta_f)
fft(tmp, f)
return f
hc = wfutils.taper_timeseries(hc, input_params['taper'],
return_lal=False)
# total duration of the waveform
tmplt_length = len(hp) * hp.delta_t
# for IMR templates the zero of time is at max amplitude (merger)
# thus the start time is minus the duration of the template from
# lower frequency cutoff to merger, i.e. minus the 'chirp time'
tChirp = - float( hp.start_time ) # conversion from LIGOTimeGPS
hp.resize(N)
hc.resize(N)
k_zero = int(hp.start_time / hp.delta_t)
hp.roll(k_zero)
hc.roll(k_zero)
hp_tilde = FrequencySeries(outplus, delta_f=delta_f, copy=False)
hc_tilde = FrequencySeries(outcross, delta_f=delta_f, copy=False)
fft(hp.astype(real_same_precision_as(hp_tilde)), hp_tilde)
fft(hc.astype(real_same_precision_as(hc_tilde)), hc_tilde)
hp_tilde.length_in_time = tmplt_length
hp_tilde.chirp_length = tChirp
hc_tilde.length_in_time = tmplt_length
hc_tilde.chirp_length = tChirp
return hp_tilde, hc_tilde
else:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
series = scipy.signal.lfilter(coefficients, 1.0, timeseries)
return series
else:
cseries = (Array(coefficients[::-1] * 1)).astype(timeseries.dtype)
cseries.resize(len(timeseries))
cseries.roll(len(timeseries) - len(coefficients) + 1)
timeseries = Array(timeseries, copy=False)
flen = len(cseries) / 2 + 1
ftype = complex_same_precision_as(timeseries)
cfreq = zeros(flen, dtype=ftype)
tfreq = zeros(flen, dtype=ftype)
fft(Array(cseries), cfreq)
fft(Array(timeseries), tfreq)
cout = zeros(flen, ftype)
out = zeros(len(timeseries), dtype=timeseries)
correlate(cfreq, tfreq, cout)
ifft(cout, out)
return out.numpy() / len(out)
time_series = TimeSeries(zeros(old_N), delta_t =1.0/(series.delta_f*old_N),
dtype=real_same_precision_as(series))
ifft(series, time_series)
time_series.roll(-zeros_offset)
time_series.resize(new_N)
if side == 'left':
time_series.roll(zeros_offset + new_N - old_N)
elif side == 'right':
time_series.roll(zeros_offset)
out_series = FrequencySeries(zeros(new_n), epoch=series.epoch,
delta_f=delta_f, dtype=series.dtype)
fft(time_series, out_series)
return out_series
psd = self.psds[delta_f]
fseries /= psd.psdt
# trim ends of strain
if self.reduced_pad != 0:
overwhite = TimeSeries(zeros(e-s, dtype=self.strain.dtype),
delta_t=self.strain.delta_t)
pycbc.fft.ifft(fseries, overwhite)
overwhite2 = overwhite[self.reduced_pad:len(overwhite)-self.reduced_pad]
taper_window = self.trim_padding / 2.0 / overwhite.sample_rate
gate_params = [(overwhite2.start_time, 0., taper_window),
(overwhite2.end_time, 0., taper_window)]
gate_data(overwhite2, gate_params)
fseries_trimmed = FrequencySeries(zeros(len(overwhite2) / 2 + 1,
dtype=fseries.dtype), delta_f=delta_f)
pycbc.fft.fft(overwhite2, fseries_trimmed)
fseries_trimmed.start_time = fseries.start_time + self.reduced_pad * self.strain.delta_t
else:
fseries_trimmed = fseries
fseries_trimmed.psd = psd
self.segments[delta_f] = fseries_trimmed
stilde = self.segments[delta_f]
return stilde
if len(vectilde) < len(vec):
cplen = len(vectilde)
else:
cplen = len(vec)
vectilde[0:cplen] = vec[0:cplen]
delta_f = vec.delta_f
if isinstance(vec,TimeSeries):
vec_pad = TimeSeries(zeros(N),delta_t=vec.delta_t,
dtype=real_same_precision_as(vec))
vec_pad[0:len(vec)] = vec
delta_f = 1.0/(vec.delta_t*N)
vectilde = FrequencySeries(zeros(n),delta_f=1.0,
dtype=complex_same_precision_as(vec))
fft(vec_pad,vectilde)
vectilde = FrequencySeries(vectilde * DYN_RANGE_FAC,delta_f=delta_f,dtype=complex64)
return vectilde