numpy.newaxis - python examples

Here are the examples of the python api numpy.newaxis taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

145 Examples 7

3 View Complete Implementation : _base.py
Copyright MIT License
Author : alvarob96
def softmax(X):
    """Compute the K-way softmax function inplace.

    Parameters
    ----------
    X : {array-like, sparse matrix}, shape (n_samples, n_features)
        The input data.

    Returns
    -------
    X_new : {array-like, sparse matrix}, shape (n_samples, n_features)
        The transformed data.
    """
    tmp = X - X.max(axis=1)[:, np.newaxis]
    np.exp(tmp, out=X)
    X /= X.sum(axis=1)[:, np.newaxis]

    return X

3 View Complete Implementation : quiver.py
Copyright MIT License
Author : alvarob96
    def set_offsets(self, xy):
        """
        Set the offsets for the barb polygons.  This saves the offsets pasted
        in and actually sets version masked as appropriate for the existing
        U/V data. *offsets* should be a sequence.

        ACCEPTS: sequence of pairs of floats
        """
        self.x = xy[:, 0]
        self.y = xy[:, 1]
        x, y, u, v = delete_masked_points(self.x.ravel(), self.y.ravel(),
                                          self.u, self.v)
        _check_consistent_shapes(x, y, u, v)
        xy = np.hstack((x[:, np.newaxis], y[:, np.newaxis]))
        mcollections.PolyCollection.set_offsets(self, xy)
        self.stale = True

3 View Complete Implementation : test_coordinate_descent.py
Copyright MIT License
Author : alvarob96
def test_1d_multioutput_enet_and_mulsatask_enet_cv():
    X, y, _, _ = build_dataset(n_features=10)
    y = y[:, np.newaxis]
    clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
    clf.fit(X, y[:, 0])
    clf1 = MulsataskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
    clf1.fit(X, y)
    astert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
    astert_almost_equal(clf.alpha_, clf1.alpha_)
    astert_almost_equal(clf.coef_, clf1.coef_[0])
    astert_almost_equal(clf.intercept_, clf1.intercept_[0])

3 View Complete Implementation : test_base.py
Copyright MIT License
Author : alvarob96
def test_linear_regression_sparse(random_state=0):
    # Test that linear regression also works with sparse data
    random_state = check_random_state(random_state)
    for i in range(10):
        n = 100
        X = sparse.eye(n, n)
        beta = random_state.rand(n)
        y = X * beta[:, np.newaxis]

        ols = LinearRegression()
        ols.fit(X, y.ravel())
        astert_array_almost_equal(beta, ols.coef_ + ols.intercept_)

        astert_array_almost_equal(ols.predict(X) - y.ravel(), 0)

3 View Complete Implementation : test_base.py
Copyright MIT License
Author : alvarob96
def test_rescale_data():
    n_samples = 200
    n_features = 2

    sample_weight = 1.0 + rng.rand(n_samples)
    X = rng.rand(n_samples, n_features)
    y = rng.rand(n_samples)
    rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
    rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
    rescaled_y2 = y * np.sqrt(sample_weight)
    astert_array_almost_equal(rescaled_X, rescaled_X2)
    astert_array_almost_equal(rescaled_y, rescaled_y2)

3 View Complete Implementation : linear_assignment_.py
Copyright MIT License
Author : alvarob96
def _step1(state):
    """Steps 1 and 2 in the Wikipedia page."""

    # Step1: For each row of the matrix, find the smallest element and
    # subtract it from every element in its row.
    state.C -= state.C.min(axis=1)[:, np.newaxis]
    # Step2: Find a zero (Z) in the resulting matrix. If there is no
    # starred zero in its row or column, star Z. Repeat for each element
    # in the matrix.
    for i, j in zip(*np.where(state.C == 0)):
        if state.col_uncovered[j] and state.row_uncovered[i]:
            state.marked[i, j] = 1
            state.col_uncovered[j] = False
            state.row_uncovered[i] = False

    state._clear_covers()
    return _step3

3 View Complete Implementation : test_dict_learning.py
Copyright MIT License
Author : alvarob96
def test_dict_learning_nonzero_coefs():
    n_components = 4
    dico = DictionaryLearning(n_components, transform_algorithm='lars',
                              transform_n_nonzero_coefs=3, random_state=0)
    code = dico.fit(X).transform(X[np.newaxis, 1])
    astert_true(len(np.flatnonzero(code)) == 3)

    dico.set_params(transform_algorithm='omp')
    code = dico.transform(X[np.newaxis, 1])
    astert_equal(len(np.flatnonzero(code)), 3)

3 View Complete Implementation : calibration.py
Copyright MIT License
Author : alvarob96
    def _preproc(self, X):
        n_clastes = len(self.clastes_)
        if hasattr(self.base_estimator, "decision_function"):
            df = self.base_estimator.decision_function(X)
            if df.ndim == 1:
                df = df[:, np.newaxis]
        elif hasattr(self.base_estimator, "predict_proba"):
            df = self.base_estimator.predict_proba(X)
            if n_clastes == 2:
                df = df[:, 1:]
        else:
            raise RuntimeError('clastifier has no decision_function or '
                               'predict_proba method.')

        idx_pos_clast = self.label_encoder_.\
            transform(self.base_estimator.clastes_)

        return df, idx_pos_clast

3 View Complete Implementation : bayesian_mixture.py
Copyright MIT License
Author : alvarob96
    def _estimate_means(self, nk, xk):
        """Estimate the parameters of the Gaussian distribution.

        Parameters
        ----------
        nk : array-like, shape (n_components,)

        xk : array-like, shape (n_components, n_features)
        """
        self.mean_precision_ = self.mean_precision_prior_ + nk
        self.means_ = ((self.mean_precision_prior_ * self.mean_prior_ +
                        nk[:, np.newaxis] * xk) /
                       self.mean_precision_[:, np.newaxis])

3 View Complete Implementation : test_weight_boosting.py
Copyright MIT License
Author : alvarob96
def test_importances():
    # Check variable importances.
    X, y = datasets.make_clastification(n_samples=2000,
                                        n_features=10,
                                        n_informative=3,
                                        n_redundant=0,
                                        n_repeated=0,
                                        shuffle=False,
                                        random_state=1)

    for alg in ['SAMME', 'SAMME.R']:
        clf = AdaBoostClastifier(algorithm=alg)

        clf.fit(X, y)
        importances = clf.feature_importances_

        astert_equal(importances.shape[0], 10)
        astert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
                     True)