How to use the numpy.ones function in numpy

To help you get started, we’ve selected a few numpy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github CCI-Tools / cate / tests / ops / test_subset.py View on Github external
def test_generic_masked_inverted(self):
        """
        Test using a generic Polygon and masking
        """
        # Africa
        a = str('POLYGON((-10.8984375 35.60371874069731,-19.16015625 '
                '23.885837699861995,-20.56640625 17.14079039331665,-18.6328125 '
                '7.536764322084079,-10.72265625 0.7031073524364783,10.37109375 '
                '0.3515602939922709,10.37109375 -22.268764039073965,22.8515625 '
                '-42.29356419217007,37.79296875 -27.21555620902968,49.39453125 '
                '-3.5134210456400323,54.4921875 14.093957177836236,18.984375 '
                '35.88905007936091,-10.8984375 35.60371874069731))')

        # Inverted lat
        dataset = xr.Dataset({
            'first': (['lat', 'lon', 'time'], np.ones([180, 360, 6])),
            'second': (['lat', 'lon', 'time'], np.ones([180, 360, 6])),
            'lat': np.linspace(89.5, -89.5, 180),
            'lon': np.linspace(-179.5, 179.5, 360)})
        actual = subset.subset_spatial(dataset, a)
        # Gulf of Guinea
        gog = actual.sel(method='nearest', **{'lon': 1.2, 'lat': -1.4})
        self.assertTrue(np.isnan(gog['first']).all())
        # Africa
        self.assertTrue(1 == actual.sel(method='nearest', **{'lon': 20.7, 'lat': 6.15}))
github peterwittek / ncpol2sdpa / examples / max_cut.py View on Github external
# -*- coding: utf-8 -*-
"""
A polynomial optimization problem of commutative variables. It is mentioned in
Section 5.12 of the following paper:

Henrion, D.; Lasserre, J. & Löfberg, J. GloptiPoly 3: moments, optimization and
semidefinite programming. Optimization Methods & Software, 2009, 24, 761-779

Created on Thu May 15 12:12:40 2014

@author: wittek
"""
import numpy as np
from ncpol2sdpa import SdpRelaxation, generate_variables

W = np.diag(np.ones(8), 1) + np.diag(np.ones(7), 2) + np.diag([1, 1], 7) + \
    np.diag([1], 8)
W = W + W.T
n = len(W)
e = np.ones(n)
Q = (np.diag(np.dot(e.T, W)) - W) / 4

x = generate_variables(n, commutative=True)
equalities = [xi ** 2 - 1 for xi in x]

objective = -np.dot(x, np.dot(Q, np.transpose(x)))

level = 1

sdpRelaxation = SdpRelaxation(x)
sdpRelaxation.get_relaxation(level, objective=objective, equalities=equalities,
                             removeequalities=True)
github RJT1990 / pyflux / pyflux / ssm / kalman.py View on Github external
mu : float
        Constant term for measurement equation

    Returns
    ----------
    alpha : np.array
        Smoothed states

    V : np.array
        Variance of smoothed states
    """     

    # Filtering matrices
    a = np.zeros((T.shape[0],y.shape[0]+1)) 
    a[0][0] = np.mean(y[0:5]) # Initialization
    P = np.ones((a.shape[0],a.shape[0],y.shape[0]+1))*(10**7) # diffuse prior asumed
    L = np.zeros((a.shape[0],a.shape[0],y.shape[0]+1))
    K = np.zeros((a.shape[0],y.shape[0]))
    v = np.zeros(y.shape[0])
    F = np.zeros((H.shape[0],H.shape[1],y.shape[0]))

    # Smoothing matrices
    N = np.zeros((a.shape[0],a.shape[0],y.shape[0]+1))
    V = np.zeros((a.shape[0],a.shape[0],y.shape[0]+1))
    alpha = np.zeros((T.shape[0],y.shape[0]+1)) 
    r = np.zeros((T.shape[0],y.shape[0]+1)) 

    # FORWARDS (FILTERING)
    for t in range(0,y.shape[0]):
        v[t] = y[t] - np.dot(Z,a[:,t]) - mu

        F[:,:,t] = np.dot(np.dot(Z,P[:,:,t]),Z.T) + H.ravel()[0]
github tensorflow / transform / tensorflow_transform / analyzers.py View on Github external
a_count, b_count = _pad_arrays_to_match(a.count, b.count)
    a_mean, b_mean = _pad_arrays_to_match(a.mean, b.mean)
    if self._compute_variance:
      a_variance, b_variance = _pad_arrays_to_match(a.variance, b.variance)
    if self._compute_weighted:
      a_weight, b_weight = _pad_arrays_to_match(a.weight, b.weight)

    combined_total = a_count + b_count

    # Mean and variance update formulas which are more numerically stable when
    # a and b vary in magnitude.
    if self._compute_weighted:
      combined_weights_mean = (
          a_weight + (b_count / combined_total) * (b_weight - a_weight))
    else:
      combined_weights_mean = np.ones(shape=combined_total.shape)
      b_weight = np.ones(shape=b_mean.shape)

    combined_mean = a_mean + (b_count * b_weight /
                              (combined_total * combined_weights_mean)) * (
                                  b_mean - a_mean)
    if self._compute_variance:
      # TODO(zoyahav): Add an option for weighted variance if needed.
      assert not self._compute_weighted
      combined_variance = (
          a_variance + (b_count / combined_total) * (b_variance - a_variance +
                                                     ((b_mean - combined_mean) *
                                                      (b_mean - a_mean))))
    else:
      combined_variance = np.zeros(combined_mean.shape)

    return _WeightedMeanAndVarAccumulator(combined_total, combined_mean,
github sea-boat / seq2seq_chatbot / train.py View on Github external
tf.unstack(decoder_inputs, axis=1),
    cell,
    num_encoder_symbols,
    num_decoder_symbols,
    embedding_size,
    feed_previous=False
)
logits = tf.stack(results, axis=1)
print("sssss: ", logits)
loss = tf.contrib.seq2seq.sequence_loss(logits, targets=targets, weights=weights)
pred = tf.argmax(logits, axis=2)

train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)

saver = tf.train.Saver()
train_weights = np.ones(shape=[batch_size, sequence_length], dtype=np.float32)
with tf.Session() as sess:
    ckpt = tf.train.get_checkpoint_state(model_dir)
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
    else:
        sess.run(tf.global_variables_initializer())
    epoch = 0
    while epoch < 5000000:
        epoch = epoch + 1
        print("epoch:", epoch)
        for step in range(0, 1):
            print("step:", step)
            train_x, train_y, train_target = loadQA()
            train_encoder_inputs = train_x[step * batch_size:step * batch_size + batch_size, :]
            train_decoder_inputs = train_y[step * batch_size:step * batch_size + batch_size, :]
            train_targets = train_target[step * batch_size:step * batch_size + batch_size, :]
github kerrickstaley / extracting-chinese-subs / main.py View on Github external
def dilate_erode3(img):
  "Closes the img"
  kernel = np.ones((3, 3), np.uint8)
  img = cv2.dilate(img, kernel)
  img = cv2.erode(img, kernel)
  return img
github GeoscienceAustralia / tcrm / TrackGenerator / TrackGenerator.py View on Github external
:return: a tuple of :class:`numpy.ndarray`'s
                 The tuple consists of::

                      index - the tropical cyclone index
                      age - age of the tropical cyclone
                      lon - longitude
                      lat - latitude
                      speed
                      bearing
                      pressure
                      penv - environment pressure
                      rmax - maximum radius
        """

        index = np.ones(self.maxTimeSteps, 'f') * cycloneNumber
        dates = np.empty(self.maxTimeSteps, dtype=datetime)
        age = np.empty(self.maxTimeSteps, 'f')
        jday = np.empty(self.maxTimeSteps, 'f')
        lon = np.empty(self.maxTimeSteps, 'f')
        lat = np.empty(self.maxTimeSteps, 'f')
        speed = np.empty(self.maxTimeSteps, 'f')
        bearing = np.empty(self.maxTimeSteps, 'f')
        pressure = np.empty(self.maxTimeSteps, 'f')
        poci = np.empty(self.maxTimeSteps, 'f')
        rmax = np.empty(self.maxTimeSteps, 'f')
        land = np.empty(self.maxTimeSteps, 'i')
        dist = np.empty(self.maxTimeSteps, 'f')

        # Initialise the track
        poci_eps = normal(0., 2.5717)
        lfeps = lognorm(0.69527, -0.06146, 0.0471)
github jonathf / chaospy / chaospy / quad / collection / golub_welsch.py View on Github external
Examples:
        >>> Z = chaospy.Normal()
        >>> x, w = chaospy.quad_golub_welsch(3, Z)
        >>> print(numpy.around(x, 4))
        [[-2.3344 -0.742   0.742   2.3344]]
        >>> print(numpy.around(w, 4))
        [0.0459 0.4541 0.4541 0.0459]
        >>> Z = chaospy.J(chaospy.Uniform(), chaospy.Uniform())
        >>> x, w = chaospy.quad_golub_welsch(1, Z)
        >>> print(numpy.around(x, 4))
        [[0.2113 0.2113 0.7887 0.7887]
         [0.2113 0.7887 0.2113 0.7887]]
        >>> print(numpy.around(w, 4))
        [0.25 0.25 0.25 0.25]
    """
    order = numpy.array(order)*numpy.ones(len(dist), dtype=int)+1
    _, _, coeff1, coeff2 = chaospy.quad.generate_stieltjes(
        dist, numpy.max(order), accuracy=accuracy, retall=True, **kws)

    dimensions = len(dist)
    abscisas, weights = _golub_welsch(order, coeff1, coeff2)

    if dimensions == 1:
        abscisa = numpy.reshape(abscisas, (1, order[0]))
        weight = numpy.reshape(weights, (order[0],))
    else:
        abscisa = chaospy.quad.combine(abscisas).T
        weight = numpy.prod(chaospy.quad.combine(weights), -1)

    assert len(abscisa) == dimensions
    assert len(weight) == len(abscisa.T)
    return abscisa, weight
github Qiskit / qiskit-aqua / qiskit_aqua / algorithms / many_sample / qsvm / _qsvm_kernel_binary.py View on Github external
Returns:
            numpy.ndarray: 2-D matrix, N1xN2
        """
        from ._qsvm_kernel_binary import _QSVM_Kernel_Binary

        if x2_vec is None:
            is_symmetric = True
            x2_vec = x1_vec
        else:
            is_symmetric = False

        is_statevector_sim = self.qalgo.quantum_instance.is_statevector
        measurement = not is_statevector_sim
        measurement_basis = '0' * self.num_qubits
        mat = np.ones((x1_vec.shape[0], x2_vec.shape[0]))
        num_processes = psutil.cpu_count(logical=False) if platform.system() != "Windows" else 1

        # get all to-be-computed indices
        if is_symmetric:
            mus, nus = np.triu_indices(x1_vec.shape[0], k=1)  # remove diagonal term
        else:
            mus, nus = np.indices((x1_vec.shape[0], x2_vec.shape[0]))
            mus = np.asarray(mus.flat)
            nus = np.asarray(nus.flat)

        for idx in range(0, len(mus), self.BATCH_SIZE):
            circuits = {}
            to_be_simulated_circuits = []
            with concurrent.futures.ProcessPoolExecutor(max_workers=num_processes) as executor:
                futures = {}
                for sub_idx in range(idx, min(idx + self.BATCH_SIZE, len(mus))):
github febert / visual_mpc / python_visual_mpc / video_prediction / tracking_model / single_point_tracking_model.py View on Github external
def sample_initpoints(self, flow_map=None):
        """
        :param flow_map at first time step
        :return: batch of sample cooridnates (one for each batch example)
        """
        batch_size = self.conf['batch_size']

        if flow_map is not None:
            flow_magnitudes = tf.norm(flow_map)
            flow_magnitudes /= tf.reduce_sum(flow_magnitudes, axis=[1,2])
            flow_magnitudes = tf.reshape(flow_magnitudes, [batch_size, -1])
            log_prob = tf.log(flow_magnitudes)
        else:
            log_prob = tf.constant(np.ones([batch_size, 64**2])/64**2)

        coords = tf.multinomial(log_prob, 1)
        coords = unravel_ind(coords, [64, 64])
        return coords