%load_ext autoreload
%autoreload 2
%matplotlib inline
SEED = 239
import os
import numpy as np
np.random.seed(SEED)
np.set_printoptions(precision=4)
import torch
torch.manual_seed(SEED)
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
import matplotlib.collections as mcol
import matplotlib.transforms as mtransforms
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from scipy.spatial.transform import Rotation as R
from scipy.special import softmax
from sklearn.metrics import confusion_matrix
from tqdm import tqdm
from models import PointCMLP, SteerableModel
from utils import (plot_confusion_matrix,
get_tetris_data, plot_shapes,
visualize_skeleton, get_edges,
score, build_mlgp,
construct_filter_banks, unembed_points,
transform_parameters, build_steerable_model,
random_axis_angle, torch_rotation_matrix, entropy)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.__version__, device
('1.8.1+cu102', device(type='cuda'))
in the following steps:
Step 1. Train the ancestor MLGP.
Step 2. Transform the hidden unit parameters into filter banks.
Step 3. Fix the learned parameters and add the interpolation coefficients $v^k$ as learnable parameters to fulfill the steerability constraint $\rightarrow$ Steerable spherical classifier.
data_path = 'utkinect_skeletons_data/'
action_to_label = {
'sitDown': 0,
'throw': 1,
'carry': 2,
'push': 3,
'waveHands': 4,
'walk': 5,
'clapHands': 6,
'pull': 7,
'pickUp': 8,
'standUp': 9
}
Xtrain = np.load(os.path.join(data_path, 'Xtrain.npy'))
Ytrain = np.load(os.path.join(data_path, 'Ytrain.npy'))
Xval = np.load(os.path.join(data_path, 'Xval.npy'))
Yval = np.load(os.path.join(data_path, 'Yval.npy'))
Xtest = np.load(os.path.join(data_path, 'Xtest.npy'))
Ytest = np.load(os.path.join(data_path, 'Ytest.npy'))
# shuffle the data:
idcs = np.arange(len(Xtrain))
np.random.shuffle(idcs)
Xtrain = Xtrain[idcs]
Ytrain = Ytrain[idcs]
# sanity check:
idx = np.random.choice(len(Xtrain))
skeleton = Xtrain[idx]
edges = get_edges(skeleton)
visualize_skeleton(skeleton.T, edges, size=100, show_grid=True, azim=0)
plt.title(f'skeleton for action "{list(action_to_label.keys())[np.argmax(list(action_to_label.values()) == Ytrain[idx])]}" ({Ytrain[idx]})')
Text(0.5, 0.92, 'skeleton for action "walk" (5)')
Xtrain = torch.from_numpy(Xtrain).float().to(device)
Ytrain = torch.from_numpy(Ytrain).to(device)
Xval = torch.from_numpy(Xval).float().to(device)
Yval = torch.from_numpy(Yval).to(device)
Xtest = torch.from_numpy(Xtest).float().to(device)
Ytest = torch.from_numpy(Ytest).to(device)
print('Xtrain.shape, Ytrain.shape:', Xtrain.shape, Ytrain.shape)
print('Xval.shape, Yval.shape:', Xval.shape, Yval.shape)
print('Xtest.shape, Ytest.shape:', Xtest.shape, Ytest.shape)
Xtrain.shape, Ytrain.shape: torch.Size([2295, 20, 3]) torch.Size([2295]) Xval.shape, Yval.shape: torch.Size([670, 20, 3]) torch.Size([670]) Xtest.shape, Ytest.shape: torch.Size([3062, 20, 3]) torch.Size([3062])
N_GEOMETRIC_NEURONS = 12
OUTPUT_DIM = 10 # len(set(Ytrain.numpy()))
# set the seed here:
torch.manual_seed(SEED)
# instantiate the model:
model = build_mlgp(input_shape=Xtrain.shape[1:], output_dim=OUTPUT_DIM, hidden_layer_sizes=[N_GEOMETRIC_NEURONS], bias=False)
print(model)
print('total number of trainable parameters:', sum([np.prod(p.size()) for p in filter(lambda p: p.requires_grad, model.parameters())]))
print()
model = model.float().to(device)
# define the loss and optimizer:
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
epochs = 10000
# train the model:
for i in range(epochs):
y_pred = model(Xtrain)
loss = criterion(y_pred, Ytrain)
optimizer.zero_grad()
loss.backward()
optimizer.step()
acc = score(y_pred.detach(), Ytrain)
if i % 500 == 0:
y_val_pred = model(Xval)
val_loss = criterion(y_val_pred, Yval)
val_acc = score(y_val_pred.detach(), Yval)
print('epoch: %d, loss: %.3f, acc: %.3f, val_loss: %.3f, val_acc: %.3f' % (i, loss.item(), acc, val_loss.item(), val_acc))
print('epoch: %d, loss: %.3f, acc: %.3f, val_loss: %.3f, val_acc: %.3f' % (i, loss.item(), acc, val_loss.item(), val_acc))
model: MLGP PointCMLP( (hidden_layers): ModuleList( (0): Linear(in_features=100, out_features=12, bias=False) ) (out_layer): Linear(in_features=14, out_features=10, bias=False) ) total number of trainable parameters: 1340 epoch: 0, loss: 2.313, acc: 0.156, val_loss: 2.302, val_acc: 0.152 epoch: 500, loss: 0.839, acc: 0.712, val_loss: 0.865, val_acc: 0.696 epoch: 1000, loss: 0.580, acc: 0.782, val_loss: 0.623, val_acc: 0.754 epoch: 1500, loss: 0.460, acc: 0.831, val_loss: 0.519, val_acc: 0.810 epoch: 2000, loss: 0.382, acc: 0.866, val_loss: 0.448, val_acc: 0.848 epoch: 2500, loss: 0.319, acc: 0.892, val_loss: 0.389, val_acc: 0.867 epoch: 3000, loss: 0.266, acc: 0.913, val_loss: 0.340, val_acc: 0.881 epoch: 3500, loss: 0.222, acc: 0.928, val_loss: 0.306, val_acc: 0.897 epoch: 4000, loss: 0.186, acc: 0.941, val_loss: 0.282, val_acc: 0.904 epoch: 4500, loss: 0.157, acc: 0.952, val_loss: 0.265, val_acc: 0.904 epoch: 5000, loss: 0.133, acc: 0.961, val_loss: 0.257, val_acc: 0.907 epoch: 5500, loss: 0.112, acc: 0.969, val_loss: 0.255, val_acc: 0.918 epoch: 6000, loss: 0.095, acc: 0.973, val_loss: 0.255, val_acc: 0.913 epoch: 6500, loss: 0.080, acc: 0.979, val_loss: 0.257, val_acc: 0.919 epoch: 7000, loss: 0.068, acc: 0.983, val_loss: 0.261, val_acc: 0.919 epoch: 7500, loss: 0.058, acc: 0.986, val_loss: 0.267, val_acc: 0.921 epoch: 8000, loss: 0.049, acc: 0.990, val_loss: 0.276, val_acc: 0.924 epoch: 8500, loss: 0.042, acc: 0.991, val_loss: 0.289, val_acc: 0.925 epoch: 9000, loss: 0.036, acc: 0.994, val_loss: 0.306, val_acc: 0.928 epoch: 9500, loss: 0.031, acc: 0.996, val_loss: 0.327, val_acc: 0.924 epoch: 9999, loss: 0.026, acc: 0.996, val_loss: 0.327, val_acc: 0.924
y_test_pred = model(Xtest)
test_acc = score(y_test_pred.detach(), Ytest)
print('test_acc: %.5f' % test_acc)
test_acc: 0.92880
targets = Ytest.cpu().numpy()
predictions = torch.argmax(y_test_pred.detach(), axis=1).cpu().numpy()
cm = confusion_matrix(targets, predictions)
plot_confusion_matrix(cm, classes=list(action_to_label.keys()))#, normalize=True)
Confusion matrix, without normalization
The main function in this step is construct_filter_banks
:
To demonstrate how it works, we first extract the ancestor model hidden layer (i.e., geometric neurons) spheres, and then take one of them to form a filter bank:
# extract the spheres from the ancestor model:
original_state_dict = model.state_dict()
# get the geometric neuron spheres:
hidden_name = 'hidden_layers.0.weight'
hidden_spheres = original_state_dict[hidden_name] # (n_geometric_neurons x N_points*5)
# each sphere is a parameter vector of length 5;
# each geometric neuron contains a number of spheres corresponding to the number of input points
# in the point set
# reshape to (n_geometric_neurons x N_points x 5):
hidden_spheres_numpy = hidden_spheres.detach().cpu().numpy().reshape(len(hidden_spheres), -1, 5)
print('hidden_spheres_numpy.shape:', hidden_spheres_numpy.shape)
# e.g., select the third sphere from the second geometric neuron:
one_sphere = hidden_spheres_numpy[1,2,:]
print('\nS_tilde_k = ', one_sphere)
# construct a filter bank for this sphere:
init_rotation, filter_bank = construct_filter_banks(one_sphere, return_init_rotations=True)
print('\nR_O^k =\n', init_rotation, '\n\nB(S_tilde_k) =\n', filter_bank)
hidden_spheres_numpy.shape: (12, 20, 5) S_tilde_k = [-2.5476 0.6094 0.0804 -0.1381 -0.3713] R_O^k = [[[ 0.4189 -0.6476 -0.6365] [ 0.7434 0.6471 -0.1692] [ 0.5214 -0.4023 0.7525]]] B(S_tilde_k) = [[-2.5476 0.6094 0.0804 -0.1381 -0.3713] [ 1.2799 1.3504 1.8456 -0.1381 -0.3713] [ 0.298 -2.5677 0.4316 -0.1381 -0.3713] [ 0.9696 0.6079 -2.3576 -0.1381 -0.3713]]
We normalize, i.e., unembed, the resulting spheres to get their Euclidean $\mathbb{R}^3$ representation.
By unembedding the 5-vectors, we will get the first three elements that represent the Euclidean coordinates of the sphere center:
# the centers of the filter bank spheres:
centers = unembed_points(filter_bank)
print('\nthe four sphere centers:\n', centers)
fig = plt.figure(1, figsize=(7,7))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(centers[:, 0], centers[:, 1], centers[:, 2], s=100)
plt.title('A regular tetrahedron formed by the centers of the $B(S_k)$ spheres')
plt.show()
the four sphere centers: [[ 6.8618 -1.6415 -0.2167] [-3.4475 -3.6371 -4.9711] [-0.8027 6.9159 -1.1624] [-2.6116 -1.6373 6.3501]]
Step 2 is wrapped into the function transform_parameters
:
takes in the trained ancestor MLGP model;
transforms its parameters --- uses the hidden layer spheres to create the filter banks and keeps the output (classification) layer the same;
returns the initial rotations $R_O^k$, the filter banks $B(\tilde{S}_k)$, and the ancestor model output layer parameters.
transformed_parameters = transform_parameters(model) # used in the experiments further down
Step 3 is wrapped into the build_steerable_model
function:
creates a steerable model with learnable interpolation coefficients $v^k$ according to the constraint (13) in the paper;
the rest of the model parameters are set to be the ones obtained in Step 2 (the filter banks and the unchanged output layer) and are fixed (not updated).
# choose initial model parameters:
init_axis_angle = random_axis_angle()
# use the initial parameters and the transformed ancestor model parameters obtained in Step 2
# to build a steerable spherical classifier:
steerable_model = build_steerable_model(input_shape=Xtrain.shape[1:],
output_dim=OUTPUT_DIM,
hidden_layer_sizes=[N_GEOMETRIC_NEURONS],
init_axis_angle=init_axis_angle,
transformed_parameters=transformed_parameters,
print_hidden_layer_output=False).to(device)
print(steerable_model)
print('total number of trainable parameters:', \
sum([np.prod(p.size()) for p in filter(lambda p: p.requires_grad, steerable_model.parameters())]))
print('\ninit_axis_angle:\n', init_axis_angle)
print('\nsteerable_model.axis_angle:\n', steerable_model.axis_angle)
SteerableModel( (hidden_layers): ModuleList( (0): Linear(in_features=400, out_features=12, bias=False) ) (out_layer): Linear(in_features=14, out_features=10, bias=False) ) total number of trainable parameters: 3 init_axis_angle: [ 0.2288 0.0725 -0.2987] steerable_model.axis_angle: Parameter containing: tensor([ 0.2288, 0.0725, -0.2987], device='cuda:0', dtype=torch.float64, requires_grad=True)
(used in the experiments further down)
activation = {}
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
model.hidden_layers[0].register_forward_hook(get_activation('hidden_layer'))
output = model(Xtest)
# the ground truth hidden activations -- the ancestor MLGP hidden layer output:
gt_hidden_activations = activation['hidden_layer'].detach().cpu().numpy()
# the ground truth output activations -- the ancestor MLGP model output:
gt_outs = output.detach().cpu().numpy()
# gt_hidden_activations, gt_outs
np.random.seed(SEED)
torch.manual_seed(SEED)
n_trials = 1000
# the parameter of additive uniform noise to apply to the rotated shapes:
distortions = [0.0, 0.005, 0.01, 0.02, 0.03, 0.05]
init_axis_angles = []
accs = dict() # classification accuracies for the perturbed rotated shapes
dists = dict() # L1 distances to the ground truth hidden activations
# the same for the ancestor:
ancestor_accs = dict()
ancestor_dists = dict()
for distortion in distortions:
accs[distortion] = []
dists[distortion] = []
ancestor_accs[distortion] = []
ancestor_dists[distortion] = []
print('\ndistortion:', distortion)
for n in range(n_trials):
# construct a random ground truth rotation:
init_axis_angle = random_axis_angle()
init_axis_angles.append(init_axis_angle)
gt_rotation = torch_rotation_matrix(init_axis_angle).float().to(device)
# rotate the shapes with the ground truth:
test_data = Xtest.reshape(-1, 3) @ gt_rotation.T
test_data = test_data.reshape(Xtest.shape)
test_label = Ytest
# add uniform noise to the transformed shapes:
noise = distortion * (2 * torch.rand(test_data.shape).to(device) - 1)
test_data += noise
# construct the steerable model with the initial axis-angle parameters:
steerable_model = build_steerable_model(input_shape=test_data.shape[1:],
output_dim=OUTPUT_DIM,
hidden_layer_sizes=[N_GEOMETRIC_NEURONS],
init_axis_angle=init_axis_angle,
transformed_parameters=transformed_parameters,
print_hidden_layer_output=False).float().to(device)
# get the model output:
output = steerable_model(test_data)
ancestor_output = model(test_data)
# compute the model accuracy for the perturbed rotated shapes:
acc = score(output.detach(), test_label)
accs[distortion].append(acc)
ancestor_acc = score(ancestor_output.detach(), test_label)
ancestor_accs[distortion].append(ancestor_acc)
# compute the L1 distance between the hidden activations:
hidden_activations = steerable_model.hidden_layer_activations.cpu().numpy()
dist = np.linalg.norm(hidden_activations - gt_hidden_activations, ord=1, axis=1)
dist = np.mean(dist)
dists[distortion].append(dist)
ancestor_hidden_activations = model.hidden_layer_activations.cpu().numpy()
ancestor_dist = np.linalg.norm(ancestor_hidden_activations - gt_hidden_activations, ord=1, axis=1)
ancestor_dist = np.mean(ancestor_dist)
ancestor_dists[distortion].append(ancestor_dist)
# if n % 10 == 0:
# print('\nexperiment #%d/%d' % (n+1, n_trials))
# print('\nadditive_uniform_noise:\n', noise)
# print('\ngt_rotation:\n', gt_rotation)
# print('\nacc: %.3f' % acc)
print()
print('ancestor_acc: %.4f +/- %.4f' % (np.mean(ancestor_accs[distortion]), np.std(ancestor_accs[distortion])))
print('acc: %.4f +/- %.4f' % (np.mean(accs[distortion]), np.std(accs[distortion])))
print()
print('ancestor L1 dist: %.4f +/- %.4f' % (np.mean(ancestor_dists[distortion]), np.std(ancestor_dists[distortion])))
print('L1 dist: %.4f +/- %.4f' % (np.mean(dists[distortion]), np.std(dists[distortion])))
print(end='\n\n')
distortion: 0.0 ancestor_acc: 0.2515 +/- 0.2309 acc: 0.9288 +/- 0.0000 ancestor L1 dist: 52.5823 +/- 30.5734 L1 dist: 0.0000 +/- 0.0000 distortion: 0.005 ancestor_acc: 0.2461 +/- 0.2208 acc: 0.9238 +/- 0.0021 ancestor L1 dist: 51.8241 +/- 29.3599 L1 dist: 0.5296 +/- 0.0023 distortion: 0.01 ancestor_acc: 0.2435 +/- 0.2072 acc: 0.9110 +/- 0.0031 ancestor L1 dist: 50.4696 +/- 28.4650 L1 dist: 1.0595 +/- 0.0045 distortion: 0.02 ancestor_acc: 0.2349 +/- 0.2057 acc: 0.8714 +/- 0.0047 ancestor L1 dist: 52.6936 +/- 29.5572 L1 dist: 2.1188 +/- 0.0094 distortion: 0.03 ancestor_acc: 0.2431 +/- 0.2029 acc: 0.8225 +/- 0.0056 ancestor L1 dist: 51.0154 +/- 29.4376 L1 dist: 3.1791 +/- 0.0141 distortion: 0.05 ancestor_acc: 0.2275 +/- 0.1752 acc: 0.7196 +/- 0.0070 ancestor L1 dist: 51.2326 +/- 29.0233 L1 dist: 5.3041 +/- 0.0221
np.random.seed(SEED)
torch.manual_seed(SEED)
_, N, D = Xtest.shape
n_trials = 1000
epochs = 100
# print_period = 50 # for online optimization
# distortion rotation angle parameters (in degrees):
distortion_std = 10
distortion_means = [0, 5, 10, 15, 30]
# distortion_means = [5, 10]
all_results = dict()
for distortion_mean in distortion_means:
print('\n\n\ndistortion = %d +/- %d degrees' % (distortion_mean, distortion_std))
gt_axis_angles = []
init_axis_angles = []
gt_labels = []
init_outputs = []
init_predictions = []
init_losses = []
init_accs = []
init_dists = []
final_outputs = []
final_predictions = []
final_losses = []
final_accs = []
final_dists = []
optimized_axis_angles = []
for n in tqdm(range(n_trials)):
# 1) Randomly transform the test data:
gt_axis_angle = random_axis_angle()
gt_axis_angles.append(gt_axis_angle)
gt_rotation = torch_rotation_matrix(gt_axis_angle).float().to(device)
# print('\ngt_rotation:\n', gt_rotation)
# select a sample:
shape_idx = np.random.choice(len(Xtest))
gt_labels.append(Ytest[shape_idx])
# print('\nshape_label:', Ytest[shape_idx])
test_data = Xtest[shape_idx:shape_idx+1].reshape(-1, 3) @ gt_rotation.T
test_data = test_data.reshape(1, N, D)
test_label = Ytest[shape_idx:shape_idx+1]
# 2) Initialize steerable model parameters with distorted ground truth axis-angle
# create a "distortion" axis-angle:
distortion_angle = np.radians(distortion_std)*np.random.randn() + np.radians(distortion_mean)
# print('\ndistortion_angle:', np.degrees(distortion_angle))
distortion_axis_angle = random_axis_angle(angle=distortion_angle)
distortion_matrix = torch_rotation_matrix(distortion_axis_angle).float()
# by multiplying distortion_matrix with the gt_rotation matrix,
# we can control the rotation angle randomness:
distorted_rotation = distortion_matrix @ gt_rotation.cpu()
init_r = R.from_matrix(distorted_rotation.cpu().numpy())
init_axis_angle = init_r.as_rotvec()
init_axis_angles.append(init_axis_angle)
# construct the steerable model with init_axis_angle:
steerable_model = build_steerable_model(input_shape=test_data.shape[1:],
output_dim=OUTPUT_DIM,
hidden_layer_sizes=[N_GEOMETRIC_NEURONS],
init_axis_angle=init_axis_angle,
transformed_parameters=transformed_parameters,
print_hidden_layer_output=False).float().to(device)
# get initial model output, hidden_activations, acc and loss:
output = steerable_model(test_data)
init_output = output.detach().cpu().numpy()
init_outputs.append(init_output)
init_dist = np.linalg.norm(softmax(init_output) - softmax(gt_outs[shape_idx]), ord=1)
init_dists.append(init_dist)
init_acc = score(output.detach(), test_label)
init_accs.append(init_acc)
init_loss = entropy(output, is_logits=True).item()
init_losses.append(init_loss)
# print('\ninit_acc: %.3f' % init_acc)
# collect the initial predictions:
init_prediction = torch.argmax(output.detach(), axis=1)
init_predictions.append(init_prediction.cpu().numpy())
# print('initial prediction:', init_prediction.cpu().numpy())
# print()
# 3) Otimize the entropy loss wrt the axis-angle parameters of the steerable model:
optimizer = optim.Adam(steerable_model.parameters(), lr=1e-2)
for i in range(epochs):
# compute the entropy loss:
output = steerable_model(test_data)
loss = entropy(output, is_logits=True)
# backpropagate:
optimizer.zero_grad()
loss.backward()
optimizer.step()
# if i % print_period == 0:
# print('epoch: %d, loss: %.3f' % (i, loss.item()))
# store the final data:
final_losses.append(loss.item())
final_output = output.detach().cpu().numpy()
final_outputs.append(final_output)
final_dist = np.linalg.norm(softmax(final_output) - softmax(gt_outs[shape_idx]), ord=1)
final_dists.append(final_dist)
final_acc = score(output.detach(), test_label)
final_accs.append(final_acc)
final_prediction = torch.argmax(output.detach(), axis=1)
final_predictions.append(final_prediction.cpu().numpy())
optimized_axis_angle = steerable_model.axis_angle.detach().cpu().numpy()
optimized_axis_angles.append(optimized_axis_angle)
# print('epoch: %d, loss: %.3f' % (i, loss.item()))
# print('\nfinal_acc: %.3f' % final_acc)
# print('final prediction:', final_prediction.cpu().numpy())
# print('\ngt_axis_angle:\n', gt_axis_angle)
# print('\ninit_axis_angle:\n', init_axis_angle)
# print('optimized_axis_angle:\n', optimized_axis_angle)
all_results[distortion_mean] = {
'gt_axis_angles': gt_axis_angles,
'gt_labels': gt_labels,
'init_axis_angles': init_axis_angles,
'init_outputs': init_outputs,
'init_predictions': init_predictions,
'init_losses': init_losses,
'init_accs': init_accs,
'init_dists': init_dists,
'final_outputs': final_outputs,
'final_predictions': final_predictions,
'final_losses': final_losses,
'final_accs': final_accs,
'final_dists': final_dists,
'optimized_axis_angles': optimized_axis_angles,
}
print('\nmean_init_acc: %.4f, mean_final_acc: %.4f'
% (np.mean(all_results[distortion_mean]['init_accs']), np.mean(all_results[distortion_mean]['final_accs'])))
print('\nmean_init_dist: %.4f +/- %.4f, mean_final_dist: %.4f +/- %.4f'
% (np.mean(all_results[distortion_mean]['init_dists']), np.std(all_results[distortion_mean]['init_dists']),
np.mean(all_results[distortion_mean]['final_dists']), np.std(all_results[distortion_mean]['final_dists'])))
0%| | 0/1000 [00:00<?, ?it/s]
distortion = 0 +/- 10 degrees
100%|██████████| 1000/1000 [1:01:31<00:00, 3.69s/it] 0%| | 0/1000 [00:00<?, ?it/s]
mean_init_acc: 0.7680, mean_final_acc: 0.7690 mean_init_dist: 0.1828 +/- 0.3311, mean_final_dist: 0.1958 +/- 0.3627 distortion = 5 +/- 10 degrees
100%|██████████| 1000/1000 [1:01:47<00:00, 3.71s/it] 0%| | 0/1000 [00:00<?, ?it/s]
mean_init_acc: 0.8000, mean_final_acc: 0.8030 mean_init_dist: 0.1719 +/- 0.3303, mean_final_dist: 0.1791 +/- 0.3534 distortion = 10 +/- 10 degrees
100%|██████████| 1000/1000 [1:01:07<00:00, 3.67s/it] 0%| | 0/1000 [00:00<?, ?it/s]
mean_init_acc: 0.7390, mean_final_acc: 0.7410 mean_init_dist: 0.2308 +/- 0.3780, mean_final_dist: 0.2383 +/- 0.4000 distortion = 15 +/- 10 degrees
100%|██████████| 1000/1000 [1:05:52<00:00, 3.95s/it] 0%| | 0/1000 [00:00<?, ?it/s]
mean_init_acc: 0.6880, mean_final_acc: 0.6890 mean_init_dist: 0.2901 +/- 0.4087, mean_final_dist: 0.2933 +/- 0.4320 distortion = 30 +/- 10 degrees
100%|██████████| 1000/1000 [1:04:16<00:00, 3.86s/it]
mean_init_acc: 0.4460, mean_final_acc: 0.4460 mean_init_dist: 0.5429 +/- 0.4625, mean_final_dist: 0.5461 +/- 0.4816