Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def get_x_lag(self, w, regimes_att):
if regimes_att:
xlag = ps.lag_spatial(w, regimes_att['x'])
xlag = REGI.Regimes_Frame.__init__(self, xlag,
regimes_att['regimes'], constant_regi=None, cols2regi=regimes_att['cols2regi'])[0]
xlag = xlag.toarray()
else:
xlag = ps.lag_spatial(w, self.x)
return xlag
def __calc(self, z, op):
if op == 'c': # cross-product
zl = pysal.lag_spatial(self.w, z)
g = (z * zl).sum()
elif op == 's': # squared difference
zs = np.zeros(z.shape)
z2 = z ** 2
for i, i0 in enumerate(self.w.id_order):
neighbors = self.w.neighbor_offsets[i0]
wijs = self.w.weights[i0]
zw = zip(neighbors, wijs)
zs[i] = sum([wij * (z2[i] - 2.0 * z[i] * z[
j] + z2[j]) for j, wij in zw])
g = zs.sum()
elif op == 'a': # absolute difference
zs = np.zeros(z.shape)
for i, i0 in enumerate(self.w.id_order):
neighbors = self.w.neighbor_offsets[i0]
wijs = self.w.weights[i0]
output instance from a regression model
gwk : PySAL weights object
Spatial weights based on kernel functions
Returns
--------
psi : kxk array
Robust estimation of the variance-covariance
"""
if not constant:
reg.hac_var = check_constant(reg.hac_var)
xu = spbroadcast(reg.hac_var, reg.u)
gwkxu = lag_spatial(gwk,xu)
psi0 = spdot(xu.T,gwkxu)
counter = 0
for m in reg.multi:
reg.multi[m].robust = 'hac'
reg.multi[m].name_gwk = reg.name_gwk
try:
psi1 = spdot(reg.multi[m].varb,reg.multi[m].zthhthi)
reg.multi[m].vm = spdot(psi1,np.dot(psi0,psi1.T))
except:
reg.multi[m].vm = spdot(reg.multi[m].xtxi,np.dot(psi0,reg.multi[m].xtxi))
reg.vm[(counter*reg.kr):((counter+1)*reg.kr),(counter*reg.kr):((counter+1)*reg.kr)] = reg.multi[m].vm
counter += 1
def _calc(self, y, w, classes, k):
ly = pysal.lag_spatial(w, y)
npa = np.array
if self.fixed:
l_classes = pysal.Quantiles(ly.flatten(), k=k).yb
l_classes.shape = ly.shape
else:
l_classes = npa([pysal.Quantiles(
ly[:, i], k=k).yb for i in np.arange(self.cols)])
l_classes = l_classes.transpose()
T = np.zeros((k, k, k))
n, t = y.shape
for t1 in range(t - 1):
t2 = t1 + 1
for i in range(n):
T[l_classes[i, t1], classes[i, t1], classes[i, t2]] += 1
P = np.zeros_like(T)
def ML_Error(y, w, X, precrit=0.0000001, verbose=False, like='full'):
n = w.n
yy = (y**2).sum()
yl = ps.lag_spatial(w,y)
ylyl = (yl**2).sum()
Xy = np.dot(X.T,y)
Xl = ps.lag_spatial(w,X)
Xly = np.dot(Xl.T,y) + np.dot(X.T, yl)
Xlyl = np.dot(Xl.T, yl)
XX = np.dot(X.T, X)
XlX = np.dot(Xl.T,X) + np.dot(X.T, Xl)
XlXl = np.dot(Xl.T, Xl)
yly = np.dot(yl.T, y)
yyl = np.dot(y.T, yl)
ylyl = np.dot(yl.T, yl)
lam = 0
dlik, b, sig2, tr, dd = defer(w, lam, yy, yyl, ylyl, Xy, Xly, Xlyl, XX, XlX,
XlXl)
#roots = SPARSE.linalg.eigsh(symmetrize(w))[0]
#maxroot = 1. / roots.max()
def inverse_scg(w, data, scalar, transpose=False, symmetric=False,\
threshold=0.00001,\
max_iterations=None):
#multiplier = SP.identity(w.n) - (scalar*w.sparse) # A n x n
count = 0 # k scalar (step 1)
run_tot = copy.copy(data) # z_k n x 1 (step 1)
#residuals = data - run_tot * multiplier # r_k n x 1 (step 2)
residuals = data - pysal.lag_spatial(w, scalar*data)
#test1 = la.norm(residuals) # G_k scalar (step 3)
test1 = norm(residuals)
directions = copy.copy(residuals) # d_k n x 1 (step 6)
while test1 > threshold: # (step 4)
count += 1 # (step 5)
#changes = multiplier * directions # t n x 1 (step 7)
changes = directions - pysal.lag_spatial(w, scalar*directions)
intensity = test1 / (np.dot(directions.T, changes)) # alpha scalar (step 8)
#int_dir = intensity * directions # (step 8)
run_tot += intensity * directions
#run_tot += int_dir # (step 8)
#residuals -= int_dir # (step 8)
residuals -= intensity * changes
#test2 = la.norm(residuals) # (step 3)
test2 = norm(residuals)
directions = residuals + ((test2/test1)*directions) # (step 6)
## prep time data
t_data = get_time_data(query_result, time_cols)
plpy.debug('shape of t_data %d, %d' % t_data.shape)
plpy.debug('number of weight objects: %d, %d' % (weights.sparse).shape)
plpy.debug('first num elements: %f' % t_data[0, 0])
sp_markov_result = ps.Spatial_Markov(t_data,
weights,
k=num_classes,
fixed=False,
permutations=permutations)
## get lag classes
lag_classes = ps.Quantiles(
ps.lag_spatial(weights, t_data[:, -1]),
k=num_classes).yb
## look up probablity distribution for each unit according to class and lag class
prob_dist = get_prob_dist(sp_markov_result.P,
lag_classes,
sp_markov_result.classes[:, -1])
## find the ups and down and overall distribution of each cell
trend_up, trend_down, trend, volatility = get_prob_stats(prob_dist,
sp_markov_result.classes[:, -1])
## output the results
return zip(trend, trend_up, trend_down, volatility, weights.id_order)
method: method to use for evaluating jacobian term in concentrated likelihood function
(FULL|ORD) where FULL=Brute Force, ORD = eigenvalue based jacobian
Returns
-------
Results: dictionary with estimates, standard errors, vcv, and z-values
"""
# step 1 OLS of X on y yields b1
d = np.linalg.inv(np.dot(X.T, X))
b1 = np.dot(d, np.dot(X.T, y))
# step 2 OLS of X on Wy: yields b2
wy = ps.lag_spatial(w,y)
b2 = np.dot(d, np.dot(X.T, wy))
# step 3 compute residuals e1, e2
e1 = y - np.dot(X,b1)
e2 = wy - np.dot(X,b2)
# step 4 given e1, e2 find rho that maximizes Lc
# ols estimate of rho
XA = np.hstack((wy,X))
bols = np.dot(np.linalg.inv(np.dot(XA.T, XA)), np.dot(XA.T,y))
rols = bols[0][0]
while np.abs(rols) > 1.0:
rols = rols/2.0
ys = self.y - self.lam * ylag
xs = self.x - self.lam * xlag
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
b = np.dot(xsxsi, xsys)
self.betas = np.vstack((b, self.lam))
self.u = y - np.dot(self.x, b)
self.predy = self.y - self.u
# residual variance
self.e_filtered = self.u - self.lam * ps.lag_spatial(w, self.u)
self.sig2 = np.dot(self.e_filtered.T, self.e_filtered) / self.n
# variance-covariance matrix betas
varb = self.sig2 * xsxsi
# variance-covariance matrix lambda, sigma
a = -self.lam * W
spfill_diagonal(a, 1.0)
ai = spinv(a)
wai = spdot(W, ai)
tr1 = wai.diagonal().sum()
wai2 = spdot(wai, wai)
tr2 = wai2.diagonal().sum()