Python 3.8.11 (default, Aug 3 2021, 15:09:35)

Type "copyright", "credits" or "license" for more information.


IPython 7.22.0 -- An enhanced Interactive Python.


In [1]: runcell(0, '/home/min/a/tibrayev/RESEARCH/active_fable/fable_v2_falcon/FALcon_collect_samples_cub_imagenet.py')

/home/min/a/tibrayev/miniconda3/envs/torch_1.9_torchvision_10.0/lib/python3.8/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html

from .autonotebook import tqdm as notebook_tqdm

Traceback (most recent call last):


File "/home/min/a/tibrayev/RESEARCH/active_fable/fable_v2_falcon/FALcon_collect_samples_cub_imagenet.py", line 26, in <module>

from FALcon_config_test_as_WSOL import FALcon_config


ModuleNotFoundError: No module named 'FALcon_config_test_as_WSOL'



In [2]: runcell(0, '/home/min/a/tibrayev/RESEARCH/active_fable/fable_v2_falcon/FALcon_collect_samples_cub_imagenet.py')


In [3]: runcell('Instantiate parameters, dataloaders, and model', '/home/min/a/tibrayev/RESEARCH/active_fable/fable_v2_falcon/FALcon_collect_samples_cub_imagenet.py')

conv1.weight

bn1.weight

bn1.bias

bn1.running_mean

bn1.running_var

bn1.num_batches_tracked

layer1.0.conv1.weight

layer1.0.bn1.weight

layer1.0.bn1.bias

layer1.0.bn1.running_mean

layer1.0.bn1.running_var

layer1.0.bn1.num_batches_tracked

layer1.0.conv2.weight

layer1.0.bn2.weight

layer1.0.bn2.bias

layer1.0.bn2.running_mean

layer1.0.bn2.running_var

layer1.0.bn2.num_batches_tracked

layer1.0.conv3.weight

layer1.0.bn3.weight

layer1.0.bn3.bias

layer1.0.bn3.running_mean

layer1.0.bn3.running_var

layer1.0.bn3.num_batches_tracked

layer1.0.downsample.0.weight

layer1.0.downsample.1.weight

layer1.0.downsample.1.bias

layer1.0.downsample.1.running_mean

layer1.0.downsample.1.running_var

layer1.0.downsample.1.num_batches_tracked

layer1.1.conv1.weight

layer1.1.bn1.weight

layer1.1.bn1.bias

layer1.1.bn1.running_mean

layer1.1.bn1.running_var

layer1.1.bn1.num_batches_tracked

layer1.1.conv2.weight

layer1.1.bn2.weight

layer1.1.bn2.bias

layer1.1.bn2.running_mean

layer1.1.bn2.running_var

layer1.1.bn2.num_batches_tracked

layer1.1.conv3.weight

layer1.1.bn3.weight

layer1.1.bn3.bias

layer1.1.bn3.running_mean

layer1.1.bn3.running_var

layer1.1.bn3.num_batches_tracked

layer1.2.conv1.weight

layer1.2.bn1.weight

layer1.2.bn1.bias

layer1.2.bn1.running_mean

layer1.2.bn1.running_var

layer1.2.bn1.num_batches_tracked

layer1.2.conv2.weight

layer1.2.bn2.weight

layer1.2.bn2.bias

layer1.2.bn2.running_mean

layer1.2.bn2.running_var

layer1.2.bn2.num_batches_tracked

layer1.2.conv3.weight

layer1.2.bn3.weight

layer1.2.bn3.bias

layer1.2.bn3.running_mean

layer1.2.bn3.running_var

layer1.2.bn3.num_batches_tracked

layer2.0.conv1.weight

layer2.0.bn1.weight

layer2.0.bn1.bias

layer2.0.bn1.running_mean

layer2.0.bn1.running_var

layer2.0.bn1.num_batches_tracked

layer2.0.conv2.weight

layer2.0.bn2.weight

layer2.0.bn2.bias

layer2.0.bn2.running_mean

layer2.0.bn2.running_var

layer2.0.bn2.num_batches_tracked

layer2.0.conv3.weight

layer2.0.bn3.weight

layer2.0.bn3.bias

layer2.0.bn3.running_mean

layer2.0.bn3.running_var

layer2.0.bn3.num_batches_tracked

layer2.0.downsample.0.weight

layer2.0.downsample.1.weight

layer2.0.downsample.1.bias

layer2.0.downsample.1.running_mean

layer2.0.downsample.1.running_var

layer2.0.downsample.1.num_batches_tracked

layer2.1.conv1.weight

layer2.1.bn1.weight

layer2.1.bn1.bias

layer2.1.bn1.running_mean

layer2.1.bn1.running_var

layer2.1.bn1.num_batches_tracked

layer2.1.conv2.weight

layer2.1.bn2.weight

layer2.1.bn2.bias

layer2.1.bn2.running_mean

layer2.1.bn2.running_var

layer2.1.bn2.num_batches_tracked

layer2.1.conv3.weight

layer2.1.bn3.weight

layer2.1.bn3.bias

layer2.1.bn3.running_mean

layer2.1.bn3.running_var

layer2.1.bn3.num_batches_tracked

layer2.2.conv1.weight

layer2.2.bn1.weight

layer2.2.bn1.bias

layer2.2.bn1.running_mean

layer2.2.bn1.running_var

layer2.2.bn1.num_batches_tracked

layer2.2.conv2.weight

layer2.2.bn2.weight

layer2.2.bn2.bias

layer2.2.bn2.running_mean

layer2.2.bn2.running_var

layer2.2.bn2.num_batches_tracked

layer2.2.conv3.weight

layer2.2.bn3.weight

layer2.2.bn3.bias

layer2.2.bn3.running_mean

layer2.2.bn3.running_var

layer2.2.bn3.num_batches_tracked

layer2.3.conv1.weight

layer2.3.bn1.weight

layer2.3.bn1.bias

layer2.3.bn1.running_mean

layer2.3.bn1.running_var

layer2.3.bn1.num_batches_tracked

layer2.3.conv2.weight

layer2.3.bn2.weight

layer2.3.bn2.bias

layer2.3.bn2.running_mean

layer2.3.bn2.running_var

layer2.3.bn2.num_batches_tracked

layer2.3.conv3.weight

layer2.3.bn3.weight

layer2.3.bn3.bias

layer2.3.bn3.running_mean

layer2.3.bn3.running_var

layer2.3.bn3.num_batches_tracked

layer3.0.conv1.weight

layer3.0.bn1.weight

layer3.0.bn1.bias

layer3.0.bn1.running_mean

layer3.0.bn1.running_var

layer3.0.bn1.num_batches_tracked

layer3.0.conv2.weight

layer3.0.bn2.weight

layer3.0.bn2.bias

layer3.0.bn2.running_mean

layer3.0.bn2.running_var

layer3.0.bn2.num_batches_tracked

layer3.0.conv3.weight

layer3.0.bn3.weight

layer3.0.bn3.bias

layer3.0.bn3.running_mean

layer3.0.bn3.running_var

layer3.0.bn3.num_batches_tracked

layer3.0.downsample.0.weight

layer3.0.downsample.1.weight

layer3.0.downsample.1.bias

layer3.0.downsample.1.running_mean

layer3.0.downsample.1.running_var

layer3.0.downsample.1.num_batches_tracked

layer3.1.conv1.weight

layer3.1.bn1.weight

layer3.1.bn1.bias

layer3.1.bn1.running_mean

layer3.1.bn1.running_var

layer3.1.bn1.num_batches_tracked

layer3.1.conv2.weight

layer3.1.bn2.weight

layer3.1.bn2.bias

layer3.1.bn2.running_mean

layer3.1.bn2.running_var

layer3.1.bn2.num_batches_tracked

layer3.1.conv3.weight

layer3.1.bn3.weight

layer3.1.bn3.bias

layer3.1.bn3.running_mean

layer3.1.bn3.running_var

layer3.1.bn3.num_batches_tracked

layer3.2.conv1.weight

layer3.2.bn1.weight

layer3.2.bn1.bias

layer3.2.bn1.running_mean

layer3.2.bn1.running_var

layer3.2.bn1.num_batches_tracked

layer3.2.conv2.weight

layer3.2.bn2.weight

layer3.2.bn2.bias

layer3.2.bn2.running_mean

layer3.2.bn2.running_var

layer3.2.bn2.num_batches_tracked

layer3.2.conv3.weight

layer3.2.bn3.weight

layer3.2.bn3.bias

layer3.2.bn3.running_mean

layer3.2.bn3.running_var

layer3.2.bn3.num_batches_tracked

layer3.3.conv1.weight

layer3.3.bn1.weight

layer3.3.bn1.bias

layer3.3.bn1.running_mean

layer3.3.bn1.running_var

layer3.3.bn1.num_batches_tracked

layer3.3.conv2.weight

layer3.3.bn2.weight

layer3.3.bn2.bias

layer3.3.bn2.running_mean

layer3.3.bn2.running_var

layer3.3.bn2.num_batches_tracked

layer3.3.conv3.weight

layer3.3.bn3.weight

layer3.3.bn3.bias

layer3.3.bn3.running_mean

layer3.3.bn3.running_var

layer3.3.bn3.num_batches_tracked

layer3.4.conv1.weight

layer3.4.bn1.weight

layer3.4.bn1.bias

layer3.4.bn1.running_mean

layer3.4.bn1.running_var

layer3.4.bn1.num_batches_tracked

layer3.4.conv2.weight

layer3.4.bn2.weight

layer3.4.bn2.bias

layer3.4.bn2.running_mean

layer3.4.bn2.running_var

layer3.4.bn2.num_batches_tracked

layer3.4.conv3.weight

layer3.4.bn3.weight

layer3.4.bn3.bias

layer3.4.bn3.running_mean

layer3.4.bn3.running_var

layer3.4.bn3.num_batches_tracked

layer3.5.conv1.weight

layer3.5.bn1.weight

layer3.5.bn1.bias

layer3.5.bn1.running_mean

layer3.5.bn1.running_var

layer3.5.bn1.num_batches_tracked

layer3.5.conv2.weight

layer3.5.bn2.weight

layer3.5.bn2.bias

layer3.5.bn2.running_mean

layer3.5.bn2.running_var

layer3.5.bn2.num_batches_tracked

layer3.5.conv3.weight

layer3.5.bn3.weight

layer3.5.bn3.bias

layer3.5.bn3.running_mean

layer3.5.bn3.running_var

layer3.5.bn3.num_batches_tracked

layer4.0.conv1.weight

layer4.0.bn1.weight

layer4.0.bn1.bias

layer4.0.bn1.running_mean

layer4.0.bn1.running_var

layer4.0.bn1.num_batches_tracked

layer4.0.conv2.weight

layer4.0.bn2.weight

layer4.0.bn2.bias

layer4.0.bn2.running_mean

layer4.0.bn2.running_var

layer4.0.bn2.num_batches_tracked

layer4.0.conv3.weight

layer4.0.bn3.weight

layer4.0.bn3.bias

layer4.0.bn3.running_mean

layer4.0.bn3.running_var

layer4.0.bn3.num_batches_tracked

layer4.0.downsample.0.weight

layer4.0.downsample.1.weight

layer4.0.downsample.1.bias

layer4.0.downsample.1.running_mean

layer4.0.downsample.1.running_var

layer4.0.downsample.1.num_batches_tracked

layer4.1.conv1.weight

layer4.1.bn1.weight

layer4.1.bn1.bias

layer4.1.bn1.running_mean

layer4.1.bn1.running_var

layer4.1.bn1.num_batches_tracked

layer4.1.conv2.weight

layer4.1.bn2.weight

layer4.1.bn2.bias

layer4.1.bn2.running_mean

layer4.1.bn2.running_var

layer4.1.bn2.num_batches_tracked

layer4.1.conv3.weight

layer4.1.bn3.weight

layer4.1.bn3.bias

layer4.1.bn3.running_mean

layer4.1.bn3.running_var

layer4.1.bn3.num_batches_tracked

layer4.2.conv1.weight

layer4.2.bn1.weight

layer4.2.bn1.bias

layer4.2.bn1.running_mean

layer4.2.bn1.running_var

layer4.2.bn1.num_batches_tracked

layer4.2.conv2.weight

layer4.2.bn2.weight

layer4.2.bn2.bias

layer4.2.bn2.running_mean

layer4.2.bn2.running_var

layer4.2.bn2.num_batches_tracked

layer4.2.conv3.weight

layer4.2.bn3.weight

layer4.2.bn3.bias

layer4.2.bn3.running_mean

layer4.2.bn3.running_var

layer4.2.bn3.num_batches_tracked

fc.weight

fc.bias

Classification model:


ResNet(

(conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)

(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(relu): ReLU(inplace=True)

(maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)

(layer1): Sequential(

(0): Bottleneck(

(conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)

(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(relu): ReLU(inplace=True)

(downsample): Sequential(

(0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)

(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

)

)

(1): Bottleneck(

(conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)

(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(relu): ReLU(inplace=True)

)

(2): Bottleneck(

(conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)

(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(relu): ReLU(inplace=True)

)

)

(layer2): Sequential(

(0): Bottleneck(

(conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)

(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(relu): ReLU(inplace=True)

(downsample): Sequential(

(0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)

(1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

)

)

(1): Bottleneck(

(conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)

(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(relu): ReLU(inplace=True)

)

(2): Bottleneck(

(conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)

(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(relu): ReLU(inplace=True)

)

(3): Bottleneck(

(conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)

(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(relu): ReLU(inplace=True)

)

)

(layer3): Sequential(

(0): Bottleneck(

(conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)

(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(relu): ReLU(inplace=True)

(downsample): Sequential(

(0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False)

(1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

)

)

(1): Bottleneck(

(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)

(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(relu): ReLU(inplace=True)

)

(2): Bottleneck(

(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)

(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(relu): ReLU(inplace=True)

)

(3): Bottleneck(

(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)

(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(relu): ReLU(inplace=True)

)

(4): Bottleneck(

(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)

(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(relu): ReLU(inplace=True)

)

(5): Bottleneck(

(conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)

(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(relu): ReLU(inplace=True)

)

)

(layer4): Sequential(

(0): Bottleneck(

(conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)

(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(relu): ReLU(inplace=True)

(downsample): Sequential(

(0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False)

(1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

)

)

(1): Bottleneck(

(conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)

(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(relu): ReLU(inplace=True)

)

(2): Bottleneck(

(conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)

(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)

(bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)

(relu): ReLU(inplace=True)

)

)

(avgpool): AdaptiveAvgPool2d(output_size=(1, 1))

(fc): Linear(in_features=2048, out_features=200, bias=True)

)

Selected VGG configuration (vgg11) was loaded from checkpoint: ./results/cub/wsol_method_PSOL/trained_on_trainval_split_evaluated_on_test_split/arch_vgg11_pretrained_init_normalization_none_seed_16/model.pth


FALcon (localization) model:


VGG(

(features): Sequential(

(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))

(1): ReLU(inplace=True)

(2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)

(3): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))

(4): ReLU(inplace=True)

(5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)

(6): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))

(7): ReLU(inplace=True)

(8): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))

(9): ReLU(inplace=True)

(10): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)

(11): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))

(12): ReLU(inplace=True)

(13): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))

(14): ReLU(inplace=True)

(15): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)

(16): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))

(17): ReLU(inplace=True)

(18): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))

(19): ReLU(inplace=True)

(20): AdaptiveAvgPool2d(output_size=(1, 1))

)

(fovea_control): Sequential(

(0): Linear(in_features=512, out_features=256, bias=True)

(1): ReLU(inplace=True)

(2): Linear(in_features=256, out_features=128, bias=True)

(3): ReLU(inplace=True)

(4): Linear(in_features=128, out_features=4, bias=True)

)

(saccade_control): Sequential(

(0): Linear(in_features=512, out_features=256, bias=True)

(1): ReLU(inplace=True)

(2): Linear(in_features=256, out_features=1, bias=True)

)

)


In [4]: collected_samples = {}

   ...: path_to_samples = './results/cub/wsol_method_PSOL/trained_on_trainval_split_evaluated_on_test_split/arch_vgg11_pretrained_init_normalization_none_seed_16/'

   ...: partial_sample_collections = list(filter((lambda x: (('collected_sample_from' in x) and (not ('voc' in x)))), os.listdir(path_to_samples)))

   ...: for partial in partial_sample_collections:

   ...: ckpt = torch.load(os.path.join(path_to_samples, partial))

   ...: collected_samples.update(ckpt)

   ...:


In [5]: runcell(0, '/home/min/a/tibrayev/RESEARCH/active_fable/fable_v2_falcon/utils_custom_tvision_functions.py')


In [6]: def plotregions(list_of_regions, glimpse_size = None, color='g', **kwargs):

   ...: if glimpse_size is None:

   ...: for region in list_of_regions:

   ...: xmin = region[0].item()

   ...: ymin = region[1].item()

   ...: width = region[2].item()

   ...: height = region[3].item()

   ...: # Add the patch to the Axes

   ...: # FYI: Rectangle doc says the first argument defines bottom left corner. However, in reality it changes based on plt axis.

   ...: # So, if the origin of plt (0,0) is at top left, then (x,y) specify top left corner.

   ...: # Essentially, (x,y) needs to point to x min and y min of bbox.

   ...: plt.gca().add_patch(Rectangle((xmin,ymin), width, height, linewidth=6, edgecolor=color, facecolor='none', **kwargs))

   ...: elif glimpse_size is not None:

   ...: if isinstance(glimpse_size, tuple):

   ...: width, height = glimpse_size

   ...: else:

   ...: width = height = glimpse_size

   ...: for region in list_of_regions:

   ...: xmin = region[0].item()

   ...: ymin = region[1].item()

   ...: plt.gca().add_patch(Rectangle((xmin,ymin), width, height, linewidth=6, edgecolor=color, facecolor='none', **kwargs))

   ...:


In [7]: plt.close('all')

   ...: i = 3334

   ...: image, (target_class, target_bbox) = valid_loader.dataset[i]

   ...: target_class = target_class.unsqueeze(0)

   ...: image, target_class, target_bbox = image.unsqueeze(0).to(device), target_class.to(device), target_bbox.to(device)

   ...: imshow(image)

   ...: imshow(image), plotregions([collected_samples[i]["predictions"][p]["xywh_box"] for p in range(len(collected_samples[i]["predictions"]))], color='orange')

   ...: imshow(image), plotregions([collected_samples[i]["predictions"][p]["xywh_box"] for p in range(len(collected_samples[i]["predictions"]))], color='orange'), plotregions(collected_samples[i]["gt_bboxes"], color='r', linestyle='--')

   ...: print(len(collected_samples[i]["predictions"]))


Output from spyder call 'get_namespace_view':

1



Figures now render in the Plots pane by default. To make them also appear inline in the Console, uncheck "Mute Inline Plotting" under the Plots pane options menu.




In [8]: %matplotlib qt


In [9]: plt.close('all')

   ...: i = 3334

   ...: image, (target_class, target_bbox) = valid_loader.dataset[i]

   ...: target_class = target_class.unsqueeze(0)

   ...: image, target_class, target_bbox = image.unsqueeze(0).to(device), target_class.to(device), target_bbox.to(device)

   ...: imshow(image)

   ...: imshow(image), plotregions([collected_samples[i]["predictions"][p]["xywh_box"] for p in range(len(collected_samples[i]["predictions"]))], color='orange')

   ...: imshow(image), plotregions([collected_samples[i]["predictions"][p]["xywh_box"] for p in range(len(collected_samples[i]["predictions"]))], color='orange'), plotregions(collected_samples[i]["gt_bboxes"], color='r', linestyle='--')

   ...: print(len(collected_samples[i]["predictions"]))

1


In [9]:


In [9]:


In [10]: number_of_predictions = {}

    ...: for sample_id, sample in collected_samples.items():

    ...: predictions = len(sample["predictions"])

    ...: if predictions in number_of_predictions.keys():

    ...: number_of_predictions[predictions] += 1

    ...: else:

    ...: number_of_predictions[predictions] = 1

    ...:


In [11]: number_of_predictions

Out[11]: {1: 5494, 2: 279, 3: 19, 5: 1, 4: 1}


In [11]:


In [12]: