Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
x = np.random.randn(nobs, 3)
# xk = np.array([1, 2, 3])
xk = np.array([1., 1., 1.])
# xk = np.zeros(3)
beta = xk
y = np.dot(x, beta) + 0.1 * np.random.randn(nobs)
xk = np.dot(np.linalg.pinv(x), y)
epsilon = 1e-6
args = (y, x)
from scipy import optimize
_xfmin = optimize.fmin(fun2, (0, 0, 0), args) # @UndefinedVariable
# print(approx_fprime((1, 2, 3), fun, steps, x))
jac = Gradient(fun1, epsilon, method='forward')(xk, *args)
jacmin = Gradient(fun1, -epsilon, method='forward')(xk, *args)
# print(jac)
print(jac.sum(0))
print('\nnp.dot(jac.T, jac)')
print(np.dot(jac.T, jac))
print('\n2*np.dot(x.T, x)')
print(2 * np.dot(x.T, x))
jac2 = (jac + jacmin) / 2.
print(np.dot(jac2.T, jac2))
# he = approx_hess(xk,fun2,steps,*args)
print(Hessian(fun2, 1e-3, method='central2')(xk, *args))
he = Hessian(fun2, method='central2')(xk, *args)
print('hessfd')
print(he)
print('base_step =', None)
print(he - 2 * np.dot(x.T, x))
def __init__(self, f, step=None, method='central', order=2,
full_output=False):
super(Gradient, self).__init__(f, step=step, method=method, n=1,
order=order, full_output=full_output)
__doc__ = _cmn_doc % dict(
nobs = 200
x = np.random.randn(nobs, 3)
# xk = np.array([1, 2, 3])
xk = np.array([1., 1., 1.])
# xk = np.zeros(3)
beta = xk
y = np.dot(x, beta) + 0.1 * np.random.randn(nobs)
xk = np.dot(np.linalg.pinv(x), y)
epsilon = 1e-6
args = (y, x)
from scipy import optimize
_xfmin = optimize.fmin(fun2, (0, 0, 0), args) # @UndefinedVariable
# print(approx_fprime((1, 2, 3), fun, steps, x))
jac = Gradient(fun1, epsilon, method='forward')(xk, *args)
jacmin = Gradient(fun1, -epsilon, method='forward')(xk, *args)
# print(jac)
print(jac.sum(0))
print('\nnp.dot(jac.T, jac)')
print(np.dot(jac.T, jac))
print('\n2*np.dot(x.T, x)')
print(2 * np.dot(x.T, x))
jac2 = (jac + jacmin) / 2.
print(np.dot(jac2.T, jac2))
# he = approx_hess(xk,fun2,steps,*args)
print(Hessian(fun2, 1e-3, method='central2')(xk, *args))
he = Hessian(fun2, method='central2')(xk, *args)
print('hessfd')
print(he)
print('base_step =', None)
partials = [((_SQRT_J/2.) * (f(x + ih, *args, **kwds) -
f(x - ih, *args, **kwds))).imag
for ih in increments]
return np.array(partials).T
@staticmethod
def _multicomplex(f, fx, x, h, *args, **kwds):
n = len(x)
increments = np.identity(n) * 1j * h
partials = [f(bicomplex(x + hi, 0), *args, **kwds).imag
for hi in increments]
return np.array(partials).T
class Jacobian(Gradient):
__doc__ = _cmn_doc % dict(
derivative='Jacobian',
extra_parameter="""order : int, optional
defines the order of the error term in the Taylor approximation used.
For 'central' and 'complex' methods, it must be an even number.""",
returns="""
Returns
-------
jacob : array
Jacobian
""", extra_note="""
Higher order approximation methods will generally be more accurate, but may
also suffer more from numerical problems. First order methods is usually
not recommended.
If f returns a 1d array, it returns a Jacobian. If a 2d array is returned