numpy.atleast_2d - python examples

Here are the examples of the python api numpy.atleast_2d taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

145 Examples 7

3 View Complete Implementation : neuralnetwork.py
Copyright GNU General Public License v3.0
Author : Abhs9
    def predict(self, X, add_bias=True):
        # Initialise the output prediction as the input features. This value will be (forward) propagated through the
        # network to obtain the final prediction
        p = np.atleast_2d(X)

        # Check to see if the bias column should be added
        if add_bias:
            # Insert a column of 1's as the last entry in the feature matrix
            p = np.c_[p, np.ones((p.shape[0]))]

        # Loop over the layers in the network
        for layer in np.arange(0, len(self.W)):
            # Compute the output prediction
            p = self.sigmoid(np.dot(p, self.W[layer]))

        # Return the predicted value
        return p

3 View Complete Implementation : perceptron.py
Copyright GNU General Public License v3.0
Author : Abhs9
    def predict(self, X, add_bias=True):
        # Ensure our input is a matrix
        X = np.atleast_2d(X)

        # Check to see if the bias column should be added
        if add_bias:
            # Insert a column of 1's as the last entry in the feature matrix
            X = np.c_[X, np.ones((X.shape[0]))]

        # Past the dot product of the input features and weight matrix through the step function
        return self.step(np.dot(X, self.W))

3 View Complete Implementation : perceptron.py
Copyright GNU General Public License v3.0
Author : Abhs9
    def predict(self, X, add_bias=True):
        # Ensure our input is a matrix
        X = np.atleast_2d(X)

        # Check to see if the bias column should be added
        if add_bias:
            # Insert a column of 1's as the last entry in the feature matrix
            X = np.c_[X, np.ones((X.shape[0]))]

        # Past the dot product of the input features and weight matrix through the step function
        return self.step(np.dot(X, self.W))

3 View Complete Implementation : neuralnetwork.py
Copyright GNU General Public License v3.0
Author : Abhs9
    def predict(self, X, add_bias=True):
        # Initialise the output prediction as the input features. This value will be (forward) propagated through the
        # network to obtain the final prediction
        p = np.atleast_2d(X)

        # Check to see if the bias column should be added
        if add_bias:
            # Insert a column of 1's as the last entry in the feature matrix
            p = np.c_[p, np.ones((p.shape[0]))]

        # Loop over the layers in the network
        for layer in np.arange(0, len(self.W)):
            # Compute the output prediction
            p = self.sigmoid(np.dot(p, self.W[layer]))

        # Return the predicted value
        return p

3 View Complete Implementation : perceptron.py
Copyright GNU General Public License v3.0
Author : Abhs9
    def predict(self, X, add_bias=True):
        # Ensure our input is a matrix
        X = np.atleast_2d(X)

        # Check to see if the bias column should be added
        if add_bias:
            # Insert a column of 1's as the last entry in the feature matrix
            X = np.c_[X, np.ones((X.shape[0]))]

        # Past the dot product of the input features and weight matrix through the step function
        return self.step(np.dot(X, self.W))

3 View Complete Implementation : perceptron.py
Copyright GNU General Public License v3.0
Author : Abhs9
    def predict(self, X, add_bias=True):
        # Ensure our input is a matrix
        X = np.atleast_2d(X)

        # Check to see if the bias column should be added
        if add_bias:
            # Insert a column of 1's as the last entry in the feature matrix
            X = np.c_[X, np.ones((X.shape[0]))]

        # Past the dot product of the input features and weight matrix through the step function
        return self.step(np.dot(X, self.W))

3 View Complete Implementation : perceptron.py
Copyright GNU General Public License v3.0
Author : Abhs9
    def predict(self, X, add_bias=True):
        # Ensure our input is a matrix
        X = np.atleast_2d(X)

        # Check to see if the bias column should be added
        if add_bias:
            # Insert a column of 1's as the last entry in the feature matrix
            X = np.c_[X, np.ones((X.shape[0]))]

        # Past the dot product of the input features and weight matrix through the step function
        return self.step(np.dot(X, self.W))

3 View Complete Implementation : perceptron.py
Copyright GNU General Public License v3.0
Author : Abhs9
    def predict(self, X, add_bias=True):
        # Ensure our input is a matrix
        X = np.atleast_2d(X)

        # Check to see if the bias column should be added
        if add_bias:
            # Insert a column of 1's as the last entry in the feature matrix
            X = np.c_[X, np.ones((X.shape[0]))]

        # Past the dot product of the input features and weight matrix through the step function
        return self.step(np.dot(X, self.W))

3 View Complete Implementation : neuralnetwork.py
Copyright GNU General Public License v3.0
Author : Abhs9
    def predict(self, X, add_bias=True):
        # Initialise the output prediction as the input features. This value will be (forward) propagated through the
        # network to obtain the final prediction
        p = np.atleast_2d(X)

        # Check to see if the bias column should be added
        if add_bias:
            # Insert a column of 1's as the last entry in the feature matrix
            p = np.c_[p, np.ones((p.shape[0]))]

        # Loop over the layers in the network
        for layer in np.arange(0, len(self.W)):
            # Compute the output prediction
            p = self.sigmoid(np.dot(p, self.W[layer]))

        # Return the predicted value
        return p

3 View Complete Implementation : neuralnetwork.py
Copyright GNU General Public License v3.0
Author : Abhs9
    def predict(self, X, add_bias=True):
        # Initialise the output prediction as the input features. This value will be (forward) propagated through the
        # network to obtain the final prediction
        p = np.atleast_2d(X)

        # Check to see if the bias column should be added
        if add_bias:
            # Insert a column of 1's as the last entry in the feature matrix
            p = np.c_[p, np.ones((p.shape[0]))]

        # Loop over the layers in the network
        for layer in np.arange(0, len(self.W)):
            # Compute the output prediction
            p = self.sigmoid(np.dot(p, self.W[layer]))

        # Return the predicted value
        return p