How to use the numexpr.evaluate function in numexpr

To help you get started, we’ve selected a few numexpr examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pydata / numexpr / bench / issue-36.py View on Github external
def bench(N):
    print "*** array length:", N
    a = np.arange(N)
    t0 = time()
    ntimes = (1000*2**15) // N
    for i in xrange(ntimes):
        ne.evaluate('a>1000')
    print "numexpr--> %.3g" % ((time()-t0)/ntimes,)

    t0 = time()
    for i in xrange(ntimes):
        eval('a>1000')
    print "numpy--> %.3g" % ((time()-t0)/ntimes,)
github skuschel / postpic / postpic / particles / scalarproperties.py View on Github external
import warnings

try:
    from collections.abc import Mapping
except ImportError:
    from collections import Mapping

import numexpr as ne

__all__ = ['ScalarProperty']


class ScalarProperty(object):

    _evaluate = staticmethod(ne.evaluate)

    def __init__(self, expr, name=None, unit=None, symbol=None):
        '''
        Represents a scalar particle property.

        expr - The expression how to calcualte the value (string).
        name - the name the property can be accessed by.
        unit - unit of property.
        symbol - symbol used in formulas. Defaults to 'name' if omitted.
        '''
        self._expr = expr
        self._name = name
        self._unit = unit
        self._symbol = symbol
        self._func_cache = None  # Optimized numexpr function if available
github robintw / PyFMask / PyFMask / fmask.py View on Github external
# saturation in the three visible bands
    satu_Bv = numexpr.evaluate("(satu_B1 | satu_B2 | satu_B3)")
    del satu_B1
    # Basic cloud test
    idplcd = numexpr.evaluate(
        "(NDSI < 0.8) & (NDVI < 0.8) & (data6 > 300) & (Temp < 2700)")

    # Snow test
    # It takes every snow pixels including snow pixel under thin clouds or icy
    # clouds
    Snow[numexpr.evaluate(
        "(NDSI > 0.15) & (Temp < 1000) & (data4 > 1100) & (data2 > 1000)")] = 1
    #Snow[mask == 0] = 255
    # Water test
    # Zhe's water test (works over thin cloud)
    WT[numexpr.evaluate(
        "((NDVI < 0.01) & (data4 < 1100)) | ((NDVI < 0.1) & (NDVI > 0) & (data4 < 500))")] = 1
    WT[mask == 0] = 255
    # ################################################ Whiteness test
    # visible bands flatness (sum(abs)/mean < 0.6 => brigt and dark cloud )
    visimean = numexpr.evaluate("(data1 + data2 + data3) / 3 ")
    whiteness = numexpr.evaluate(
        "(abs(data1 - visimean) + abs(data2 - visimean)+ abs(data3 - visimean)) / visimean")
    del visimean

    # update idplcd
    whiteness[satu_Bv] = 0  # If one visible is saturated whiteness == 0
    idplcd &= whiteness < 0.7

    # Haze test
    HOT = numexpr.evaluate("data1 - 0.5 * data3 - 800")  # Haze test
    idplcd &= numexpr.evaluate("(HOT > 0) | satu_Bv")
github GeoscienceAustralia / agdc / stats / temporal_stats_numexpr_module.py View on Github external
stats = numpy.zeros((output_band_count, rows, cols), dtype='float32')

        stats[0] = numpy.nansum(array_bip, axis=2) # sum
        vld_obsv = numpy.sum(numpy.isfinite(array_bip), axis=2) # we need an int
        stats[2] = vld_obsv
        stats[1] = numexpr.evaluate("fsum / fvld_obsv", {'fsum':stats[0], 'fvld_obsv':stats[2]}) # mean


        residuals = numexpr.evaluate("array - mean", {'mean':stats[1], 'array':array})
        # variance
        t_emp = numexpr.evaluate("residuals**2").transpose(1,2,0)
        nan_sum = numpy.nansum(t_emp, axis=2)
        stats[3] = numexpr.evaluate("nan_sum / (fvld_obsv - 1)", {'fvld_obsv':stats[2], 'nan_sum':nan_sum})
        stats[4] = numexpr.evaluate("sqrt(var)", {'var':stats[3]}) # stddev
        # skewness
        t_emp = numexpr.evaluate("(residuals / stddev)**3", {'stddev':stats[4], 'residuals':residuals}).transpose(1,2,0)
        nan_sum = numpy.nansum(t_emp, axis=2)
        stats[5] = numexpr.evaluate("nan_sum / fvld_obsv", {'fvld_obsv':stats[2], 'nan_sum':nan_sum})
        # kurtosis
        t_emp = numexpr.evaluate("(residuals / stddev)**4", {'stddev':stats[4], 'residuals':residuals}).transpose(1,2,0)
        nan_sum = numpy.nansum(t_emp, axis=2)
        stats[6] = numexpr.evaluate("(nan_sum / fvld_obsv) - 3", {'fvld_obsv':stats[2], 'nan_sum':nan_sum})

        stats[7] = numpy.nanmax(array_bip, axis=2) # max
        stats[8] = numpy.nanmin(array_bip, axis=2) # min


        # 2nd quantile (aka median)
        # Don't want any interpolated values, so can't use the standard
        # numpy.median() function
        # For odd length arrays, the middle value is taken, for even length
        # arrays, the first of the two middle values is taken
github bnpy / bnpy / bnpy / util / NumericUtil.py View on Github external
def calcRlogRdotv_specificpairs_numexpr(R, v, mPairs):
    K = R.shape[1]
    m_Hresp = np.zeros(len(mPairs))
    if K == 1:
        return m_Hresp
    for m, (kA, kB) in enumerate(mPairs):
        curR = R[:, kA] + R[:, kB]
        ne.evaluate("curR * log(curR)", out=curR)
        m_Hresp[m] = np.dot(v, curR)
    return m_Hresp
github danielhrisca / asammdf / asammdf / mdf2.py View on Github external
elif P1 == 0:
                        vals = func((P3 / (vals - P7) - P6) / P4) / P5
                    else:
                        message = 'wrong conversion {}'.format(conversion_type)
                        raise ValueError(message)

                elif conversion_type == v2c.CONVERSION_TYPE_RAT:
                    P1 = conversion['P1']
                    P2 = conversion['P2']
                    P3 = conversion['P3']
                    P4 = conversion['P4']
                    P5 = conversion['P5']
                    P6 = conversion['P6']
                    if (P1, P2, P3, P4, P5, P6) != (0, 1, 0, 0, 0, 1):
                        X = vals
                        vals = evaluate(v2c.RAT_CONV_TEXT)

                elif conversion_type == v2c.CONVERSION_TYPE_POLY:
                    P1 = conversion['P1']
                    P2 = conversion['P2']
                    P3 = conversion['P3']
                    P4 = conversion['P4']
                    P5 = conversion['P5']
                    P6 = conversion['P6']

                    X = vals

                    coefs = (P2, P3, P5, P6)
                    if coefs == (0, 0, 0, 0):
                        if P1 != P4:
                            vals = evaluate(v2c.POLY_CONV_SHORT_TEXT)
                    else:
github skuschel / postpic / postpic / helper.py View on Github external
kwargs['nsteps'] = len(complex_field.axes[axis])
    kwargs['move_window'] = False
    kwargs['yield_zeroth_step'] = True
    gen = kspace_propagate(kspace, dt, **kwargs)

    # initialize an empty matrix
    newmat = np.empty_like(kspace.matrix)
    slices = [slice(None)] * kspace.dimensions
    expr = 'sum(km, {})'.format(axis)
    for i, kspace_prop in enumerate(gen):
        # fill the new matrix line by line by calculating the 0-component of the inverse
        # transform after each propagation step
        slices[axis] = i
        km = kspace_prop.matrix
        # newmat[slices] = np.sum(kspace_prop.matrix, axis=axis)
        newmat[slices] = ne.evaluate(expr)

    k_transverse_tprofile = kspace.replace_data(newmat)
    t_axis = datahandling.Axis(name='t', unit='s',
                               grid=np.linspace(t_input + initial_dt,
                                                t_input + initial_dt + (N-1)*dt,
                                                N))
    k_transverse_tprofile.setaxisobj(axis, t_axis)
    k_transverse_tprofile.axes_transform_state[axis] = False
    k_transverse_tprofile.transformed_axes_origins[axis] = None

    if do_fft:
        return k_transverse_tprofile.fft(otheraxes)
    else:
        return k_transverse_tprofile
github jcmgray / quimb / quimb / accel.py View on Github external
def kron_dense_big(a, b):
    """Parallelized (using numpexpr) tensor (kronecker) product for two
    dense arrays.
    """
    a, b, mp, nq = reshape_for_kron(a, b)
    return evaluate('a * b').reshape((mp, nq))
github robintw / PyFMask / PyFMask / fmask.py View on Github external
# Retrieve the projection and geotransform info from the blue band
            # (B1 LS 4,5,7)
            geoT, prj, sz, ul_coord = im_info(n_B1)

            # find pixels that are saturated in the visible bands
            B1Satu = im_B1 == 255.0
            B2Satu = im_B2 == 255.0
            B3Satu = im_B3 == 255.0

            # only processing pixesl where all bands have values (id_mssing)
            id_missing = numexpr.evaluate(
                "(im_B1 == 0.0) | (im_B2 == 0.0) | (im_B3 == 0.0) | (im_B4 == 0.0) | (im_B5 == 0.0) | (im_B6 == 0.0) | (im_B7 == 0.0)")

            # ND to radiance first
            im_B1 = numexpr.evaluate("((Lma - Lmi) / (Qma - Qmi)) * (im_B1 - Qmi) + Lmi", {
                                     'Lma': Lmax[0], 'Lmi': Lmin[0], 'Qma': Qcalmax[0], 'Qmi': Qcalmin[0]}, locals())
            im_B2 = numexpr.evaluate("((Lma - Lmi) / (Qma - Qmi)) * (im_B2 - Qmi) + Lmi", {
                                     'Lma': Lmax[1], 'Lmi': Lmin[1], 'Qma': Qcalmax[1], 'Qmi': Qcalmin[1]}, locals())
            im_B3 = numexpr.evaluate("((Lma - Lmi) / (Qma - Qmi)) * (im_B3 - Qmi) + Lmi", {
                                     'Lma': Lmax[2], 'Lmi': Lmin[2], 'Qma': Qcalmax[2], 'Qmi': Qcalmin[2]}, locals())
            im_B4 = numexpr.evaluate("((Lma - Lmi) / (Qma - Qmi)) * (im_B4 - Qmi) + Lmi", {
                                     'Lma': Lmax[3], 'Lmi': Lmin[3], 'Qma': Qcalmax[3], 'Qmi': Qcalmin[3]}, locals())
            im_B5 = numexpr.evaluate("((Lma - Lmi) / (Qma - Qmi)) * (im_B5 - Qmi) + Lmi", {
                                     'Lma': Lmax[4], 'Lmi': Lmin[4], 'Qma': Qcalmax[4], 'Qmi': Qcalmin[4]}, locals())
            im_B6 = numexpr.evaluate("((Lma - Lmi) / (Qma - Qmi)) * (im_B6 - Qmi) + Lmi", {
                                     'Lma': Lmax[5], 'Lmi': Lmin[5], 'Qma': Qcalmax[5], 'Qmi': Qcalmin[5]}, locals())
            im_B7 = numexpr.evaluate("((Lma - Lmi) / (Qma - Qmi)) * (im_B7 - Qmi) + Lmi", {
                                     'Lma': Lmax[6], 'Lmi': Lmin[6], 'Qma': Qcalmax[6], 'Qmi': Qcalmin[6]}, locals())

            # radiance to TOA reflectances
            # fprintf('From Radiances to TOA ref')
github GeoscienceAustralia / agdc / stats / temporal_stats_numexpr_module.py View on Github external
# 2nd quantile (aka median)
        # Don't want any interpolated values, so can't use the standard
        # numpy.median() function
        # For odd length arrays, the middle value is taken, for even length
        # arrays, the first of the two middle values is taken

        flat_sort = numpy.sort(array, axis=0).flatten()

        # q2_offset
        q2_idx = numexpr.evaluate("((vld_obsv / 2) + ((vld_obsv % 2) - 1))") # median locations

        # array offsets
        a_off = numpy.arange(cols*rows).reshape((rows,cols))

        # turn the locations into a useable 1D index
        idx_1D = numexpr.evaluate("(q2_idx * cols * rows + a_off)").astype('int')
        stats[9] = flat_sort[idx_1D] # get median values

        # now to find the original index of the median value
        sort_orig_idx = numpy.argsort(array, axis=0).flatten()
        stats[10]  = sort_orig_idx[idx_1D]

        # 1st quantile
        length = numexpr.evaluate("q2_idx + 1")
        q1_idx  = numexpr.evaluate("((length / 2) + ((length % 2) - 1))")
        idx_1D = numexpr.evaluate("(q1_idx * cols * rows + a_off)").astype('int') # 1D index
        stats[11] = flat_sort[idx_1D] # get 1st quantile


        # 3rd quantile
        idx_1D = numexpr.evaluate("((q2_idx + q1_idx) * cols * rows + a_off)").astype('int') # 1D index
        stats[12] = flat_sort[idx_1D] # get 3rd quantile