numpy.matmul - python examples

Here are the examples of the python api numpy.matmul taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

145 Examples 7

3 View Complete Implementation : numpy.py
Copyright Apache License 2.0
Author : rlgraph
def dense_layer(x, weights, biases=None):
    """
    Calculates the outputs of a dense layer given weights/biases and an input.

    Args:
        x (np.ndarray): The input to the dense layer.
        weights (np.ndarray): The weights matrix.
        biases (Optional[np.ndarray]): The biases vector. All 0s if None.

    Returns:
        The dense layer's output.
    """
    return np.matmul(x, weights) + (0.0 if biases is None else biases)

3 View Complete Implementation : test_dqn_agent_functionality.py
Copyright Apache License 2.0
Author : rlgraph
    @staticmethod
    def _helper_get_q_values(input_, matrix1, matrix2):
        """
        Calculates the q-values for a given simple 1-hidden 1-action-layer (both linear w/o biases) setup.

        Args:
            input_ (np.ndarray): The input array (batch x in-nodes).
            matrix1 (np.ndarray): The weights matrix of the hidden layer.
            matrix2 (np.ndarray): The weights matrix of the action-layer.

        Returns:
            np.ndarray: The calculated q-values.
        """
        # Simple NN implementation.
        nn_output = np.matmul(np.matmul(input_, matrix1), matrix2)
        # Simple dueling layer implementation.
        state_values = np.expand_dims(nn_output[:, 0], axis=-1)
        q_values = state_values + nn_output[:, 1:] - np.mean(nn_output[:, 1:], axis=-1, keepdims=True)
        return q_values

3 View Complete Implementation : test_optimizer_integration.py
Copyright Apache License 2.0
Author : RaviSoji
def test_A(maximum_likelihood_parameters, data_dict):
    dim = data_dict['data'].shape[-1]
    A = maximum_likelihood_parameters['A']

    astert len(A.shape) == 2
    astert A.shape[0] == dim
    astert A.shape[0] == A.shape[1]

    actual_Phi_w = np.matmul(A, A.T)
    astert_allclose(actual_Phi_w, data_dict['Phi_w'], rtol=1)

3 View Complete Implementation : neurodecoding.py
Copyright MIT License
Author : nchopin
    def diff_ft(self, xt, yt):
        """First and second derivatives (wrt x_t) of log-density of Y_t|X_t=xt
        """
        a, b = self.a, self.b 
        ex = np.exp(a + np.matmul(b, xt))  # shape = (dy,)
        grad = (-np.sum(ex[:, np.newaxis] * b, axis=0)
                + np.sum(yt.flatten()[:, np.newaxis] * b, axis=0)) #TODO flatten
        hess = np.zeros((self.dx, self.dx))
        for k in range(self.dy):
            hess -= ex[k] * np.outer(b[k,:], b[k,:]) 
        return grad, hess  

3 View Complete Implementation : test_optimizer_integration.py
Copyright Apache License 2.0
Author : RaviSoji
def test_Psi(maximum_likelihood_parameters, data_dict):
    dim = data_dict['data'].shape[-1]
    K = len(data_dict['means'])  

    relevant_U_dims = maximum_likelihood_parameters['relevant_U_dims']
    Psi = maximum_likelihood_parameters['Psi']

    astert len(Psi.shape) == 2
    astert Psi.shape[0] == dim
    astert Psi.shape[0] == Psi.shape[1]
    astert (Psi.diagonal() != 0).sum() == K - 1

    inv_A = maximum_likelihood_parameters['inv_A']
    Phi_b = data_dict['Phi_b']

    actual = np.matmul(np.matmul(inv_A, Phi_b), inv_A.T)
    actual = actual.diagonal()[relevant_U_dims]
    expected = Psi.diagonal()[relevant_U_dims]
    astert_allclose(actual, expected, rtol=4.5)

3 View Complete Implementation : test_model_inference.py
Copyright Apache License 2.0
Author : RaviSoji
def test_A_recovers_Phi_w():
    def calc_error(truth_dict):
        model = plda.Model(truth_dict['data'], truth_dict['labels'])

        expected = truth_dict['Phi_w']
        predicted = np.matmul(model.A, model.A.T)

        error = calc_mean_squared_error(expected, predicted, as_log=True)

        return error

    n_ks = [10, 100, 1000]  # List of sample sizes.

    np.random.seed(1234)
    astert_error_falls_as_n_increases(calc_error, K=2, D=2, n_k_list=n_ks)
    astert_error_falls_as_n_increases(calc_error, K=100, D=100, n_k_list=n_ks)

3 View Complete Implementation : hmm.py
Copyright MIT License
Author : nchopin
    def pred_step(self):
        if self.filt:
            p = np.matmul(self.filt[-1], self.hmm.trans_mat)
        else:
            p = self.hmm.init_dist
        self.pred.append(p)

3 View Complete Implementation : compare_mcmc_samplers_stochvol.py
Copyright MIT License
Author : nchopin
    def h(self, x, y):
        gy = self.grad_log_lik(y)
        lhs = x - self.tod * np.matmul(self.A, y + (0.25 * self.delta) * gy)
        rhs = linalg.solve(self.tod * self.A + np.eye(self.T), gy)
        # TODO pre-computing the inverse here may speed things up
        return np.dot(lhs, rhs)

3 View Complete Implementation : msd.py
Copyright GNU Lesser General Public License v3.0
Author : deepmodeling
def _msd(coords, cells, pbc_shift, begin):
    nframes = cells.shape[0]
    natoms = coords.shape[1]
    ff = begin
    prev_coord = coords[ff] + np.matmul(pbc_shift[ff], cells[ff])
    msds = [0.]
    for ff in range(begin+1,nframes) :
        curr_coord = coords[ff] + np.matmul(pbc_shift[ff], cells[ff])
        diff_coord = curr_coord - prev_coord
        msds.append(np.sum(diff_coord * diff_coord) / natoms)
    return np.array(msds)

3 View Complete Implementation : unit_tests_numpy.py
Copyright MIT License
Author : getkeops
    @unittest.skipIf(not pykeops.gpu_available,'No GPU detected. Skip tests.')
    ############################################################
    def test_gaussian_conv_specific(self):
    ############################################################
        from pykeops.numpy.convolutions.radial_kernel import RadialKernelConv
        for k, t in itertools.product(['gaussian', 'laplacian', 'cauchy', 'inverse_multiquadric'], self.type_to_test):
            with self.subTest(k=k):
                # Call cuda kernel
                my_radial_conv = RadialKernelConv(t)
                gamma = my_radial_conv(self.x.astype(t), self.y.astype(t), self.b.astype(t), self.sigma.astype(t), kernel = k)

                # Numpy version
                gamma_py = np.matmul(np_kernel(self.x, self.y, self.sigma, kernel=k), self.b)

                # compare output
                self.astertTrue(np.allclose(gamma, gamma_py, atol=1e-6))