Here are the examples of the python api numpy.array.reshape taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
145 Examples
3
View Complete Implementation : test_attack_Gaussian_blur.py
Copyright Apache License 2.0
Author : advboxes
Copyright Apache License 2.0
Author : advboxes
def test_untargeted_densenet121(image, label=None):
import torch
import torchvision.models as models
from perceptron.models.clastification import PyTorchModel
mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
model_pyt = models.densenet121(pretrained=True).eval()
if torch.cuda.is_available():
model_pyt = model_pyt.cuda()
model = PyTorchModel(
model_pyt, bounds=(0, 1), num_clastes=1000, preprocessing=(mean, std))
print(np.argmax(model.predictions(image)))
attack = Attack(model, criterion=Misclastification())
adversarial_obj = attack(image, label, unpack=False, epsilons=10000)
distance = adversarial_obj.distance
adversarial = adversarial_obj.image
return distance, adversarial
3
View Complete Implementation : test_attack_BlendedUniformNoiseAttack.py
Copyright Apache License 2.0
Author : advboxes
Copyright Apache License 2.0
Author : advboxes
def test_untargeted_resnet50(image, label=None):
import torch
import torchvision.models as models
from perceptron.models.clastification import PyTorchModel
mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
model_pyt = models.resnet50(pretrained=True).eval()
if torch.cuda.is_available():
model_pyt = model_pyt.cuda()
model = PyTorchModel(
model_pyt, bounds=(0, 1), num_clastes=1000, preprocessing=(mean, std))
print(np.argmax(model.predictions(image)))
attack = Attack(model, criterion=Misclastification())
adversarial_obj = attack(image, label, unpack=False, epsilons=10000)
distance = adversarial_obj.distance
adversarial = adversarial_obj.image
return distance, adversarial
3
View Complete Implementation : test_attack_carlini_wagner.py
Copyright Apache License 2.0
Author : advboxes
Copyright Apache License 2.0
Author : advboxes
def test_untargeted_resnet18(image, label=None):
import torch
import torchvision.models as models
from perceptron.models.clastification import PyTorchModel
mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
resnet18 = models.resnet18(pretrained=True).eval()
if torch.cuda.is_available():
resnet18 = resnet18.cuda()
model = PyTorchModel(
resnet18, bounds=(0, 1), num_clastes=1000, preprocessing=(mean, std))
print(np.argmax(model.predictions(image)))
attack = Attack(model, criterion=Misclastification())
adversarial = attack(image, label, unpack=True)
3
View Complete Implementation : test_attack_BlendedUniformNoiseAttack.py
Copyright Apache License 2.0
Author : advboxes
Copyright Apache License 2.0
Author : advboxes
def test_untargeted_AlexNet(image, label=None):
import torch
import torchvision.models as models
from perceptron.models.clastification import PyTorchModel
mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
model_pyt = models.alexnet(pretrained=True).eval()
if torch.cuda.is_available():
model_pyt = model_pyt.cuda()
model = PyTorchModel(
model_pyt, bounds=(0, 1), num_clastes=1000, preprocessing=(mean, std))
print(np.argmax(model.predictions(image)))
attack = Attack(model, criterion=Misclastification())
adversarial_obj = attack(image, label, unpack=False, epsilons=10000)
distance = adversarial_obj.distance
adversarial = adversarial_obj.image
return distance, adversarial
3
View Complete Implementation : test_attack_SaltAndPepperNoiseAttack.py
Copyright Apache License 2.0
Author : advboxes
Copyright Apache License 2.0
Author : advboxes
def test_untargeted_resnet50(image, label=None):
import torch
import torchvision.models as models
from perceptron.models.clastification import PyTorchModel
mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
model_pyt = models.resnet50(pretrained=True).eval()
if torch.cuda.is_available():
model_pyt = model_pyt.cuda()
model = PyTorchModel(
model_pyt, bounds=(0, 1), num_clastes=1000, preprocessing=(mean, std))
print(np.argmax(model.predictions(image)))
attack = Attack(model, criterion=Misclastification())
adversarial_obj = attack(image, label, unpack=False, epsilons=10000)
distance = adversarial_obj.distance
adversarial = adversarial_obj.image
return distance, adversarial
3
View Complete Implementation : test_attack_SaltAndPepperNoiseAttack.py
Copyright Apache License 2.0
Author : advboxes
Copyright Apache License 2.0
Author : advboxes
def test_untargeted_vgg16(image, label=None):
import torch
import torchvision.models as models
from perceptron.models.clastification import PyTorchModel
mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
model_pyt = models.vgg16(pretrained=True).eval()
if torch.cuda.is_available():
model_pyt = model_pyt.cuda()
model = PyTorchModel(
model_pyt, bounds=(0, 1), num_clastes=1000, preprocessing=(mean, std))
print(np.argmax(model.predictions(image)))
attack = Attack(model, criterion=Misclastification())
adversarial_obj = attack(image, label, unpack=False, epsilons=10000)
distance = adversarial_obj.distance
adversarial = adversarial_obj.image
return distance, adversarial
3
View Complete Implementation : test_attack_AdditiveGaussianNoiseAttack.py
Copyright Apache License 2.0
Author : advboxes
Copyright Apache License 2.0
Author : advboxes
def test_untargeted_resnet50(image, label=None):
import torch
import torchvision.models as models
from perceptron.models.clastification import PyTorchModel
mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
model_pyt = models.resnet50(pretrained=True).eval()
if torch.cuda.is_available():
model_pyt = model_pyt.cuda()
model = PyTorchModel(
model_pyt, bounds=(0, 1), num_clastes=1000, preprocessing=(mean, std))
print(np.argmax(model.predictions(image)))
attack = Attack(model, criterion=Misclastification())
adversarial_obj = attack(image, label, unpack=False, epsilons=10000)
distance = adversarial_obj.distance
adversarial = adversarial_obj.image
return distance, adversarial
3
View Complete Implementation : test_attack_AdditiveUniformNoiseAttack.py
Copyright Apache License 2.0
Author : advboxes
Copyright Apache License 2.0
Author : advboxes
def test_untargeted_inception_v3(image, label=None):
import torch
import torchvision.models as models
from perceptron.models.clastification import PyTorchModel
mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
model_pyt = models.inception_v3(pretrained=True).eval()
if torch.cuda.is_available():
model_pyt = model_pyt.cuda()
model = PyTorchModel(
model_pyt, bounds=(0, 1), num_clastes=1000, preprocessing=(mean, std))
print(np.argmax(model.predictions(image)))
attack = Attack(model, criterion=Misclastification())
adversarial_obj = attack(image, label, unpack=False, epsilons=10000)
distance = adversarial_obj.distance
adversarial = adversarial_obj.image
return distance, adversarial
3
View Complete Implementation : test_attack_MotionBlurAttack.py
Copyright Apache License 2.0
Author : advboxes
Copyright Apache License 2.0
Author : advboxes
def test_untargeted_resnet50(image, label=None):
import torch
import torchvision.models as models
from perceptron.models.clastification import PyTorchModel
mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
model_pyt = models.resnet50(pretrained=True).eval()
if torch.cuda.is_available():
model_pyt = model_pyt.cuda()
model = PyTorchModel(
model_pyt, bounds=(0, 1), num_clastes=1000, preprocessing=(mean, std))
print(np.argmax(model.predictions(image)))
attack = Attack(model, criterion=Misclastification())
adversarial_obj = attack(image, label, unpack=False, epsilons=10000)
distance = adversarial_obj.distance
adversarial = adversarial_obj.image
return distance, adversarial
3
View Complete Implementation : test_attack_MotionBlurAttack.py
Copyright Apache License 2.0
Author : advboxes
Copyright Apache License 2.0
Author : advboxes
def test_untargeted_Xception(image, label=None):
import keras
from perceptron.models.clastification.keras import KerasModel
mean = np.array([0.485, 0.456, 0.406]).reshape((1, 1, 3))
std = np.array([0.229, 0.224, 0.225]).reshape((1, 1, 3))
model_keras = keras.applications.xception.Xception(weights='imagenet')
model = KerasModel(model_keras, bounds=(0, 1), preprocessing=(mean, std))
print(np.argmax(model.predictions(image)))
attack = Attack(model, criterion=Misclastification())
adversarial_obj = attack(image, label, unpack=False, epsilons=10000)
distance = adversarial_obj.distance
adversarial = adversarial_obj.image
return distance, adversarial