Python 3.8.11 (default, Aug 3 2021, 15:09:35)

Type "copyright", "credits" or "license" for more information.


IPython 7.22.0 -- An enhanced Interactive Python.


In [1]: runcell(0, '/home/min/a/tibrayev/RESEARCH/active_fable/fable_v2_falcon/FALcon_collect_samples_cub_imagenet.py')

/home/min/a/tibrayev/miniconda3/envs/torch_1.9_torchvision_10.0/lib/python3.8/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html

from .autonotebook import tqdm as notebook_tqdm


In [2]: config_3 = FALcon_config


In [3]: config_3_copy = {k: v for k, v in config_3.__dict__.items() if '__' not in k}


In [4]: config_3_copy

Out[4]:

{'seed': 16,

'dataset': 'imagenet',

'dataset_dir': '/home/nano01/a/tibrayev/imagenet/annotated_imagenet2012',

'num_classes': 1000,

'in_num_channels': 3,

'full_res_img_size': (256, 256),

'gt_bbox_dir': '/home/nano01/a/tibrayev/imagenet/annotated_imagenet2012/anno_val',

'wsol_method': 'PSOL',

'pseudo_bbox_dir': '../PSOL/results/ImageNet_train_set/predicted_bounding_boxes/',

'loader_random_seed': 1,

'valid_split_size': 0.1,

'cls_model_name': 'resnet50',

'cls_pretrained': True,

'cls_ckpt_dir': None,

'save_dir': './results/imagenet/wsol_method_PSOL/trained_on_train_split/arch_vgg16_pretrained_init_normalization_none_seed_16/',

'model_name': 'vgg16',

'initialize': 'resume',

'ckpt_dir': './results/imagenet/wsol_method_PSOL/trained_on_train_split/arch_vgg16_pretrained_init_normalization_none_seed_16/model.pth',

'loader_type': 'test',

'batch_size_eval': 50,

'init_factual': 'pretrained',

'downsampling': 'M',

'fc1': 256,

'fc2': 128,

'dropout': 0.5,

'norm': 'none',

'init_weights': True,

'adaptive_avg_pool_out': (1, 1),

'saccade_fc1': 256,

'saccade_dropout': False,

'num_glimpses': 16,

'fovea_control_neurons': 4,

'glimpse_size_grid': (20, 20),

'glimpse_size_init': (20, 20),

'glimpse_size_fixed': (96, 96),

'glimpse_size_step': (20, 20),

'glimpse_change_th': 0.5,

'iou_th': 0.5,

'ratio_wrong_init_glimpses': 0.5,

'switch_location_th': 0.2,

'objectness_based_nms_th': 0.5,

'confidence_based_nms_th': 0.5}


In [5]: from voclike_imagenet_evaluator import do_voc_evaluation

   ...: collected_samples = {}

   ...: path_to_samples = './results/imagenet/wsol_method_PSOL/trained_on_train_split/arch_vgg16_pretrained_init_normalization_none_seed_16/no_ten_crop/'

   ...: partial_sample_collections = list(filter((lambda x: ('collected_sample_from' in x)), os.listdir(path_to_samples)))

   ...: for partial in partial_sample_collections:

   ...: ckpt = torch.load(os.path.join(path_to_samples, partial))

   ...: collected_samples.update(ckpt)

   ...:


In [6]: ## For WSOL results: Top 1 predictions


In [7]: acc_correct_class = 0

   ...: acc_localization = 0

   ...: acc_class_localized = 0

   ...: total_samples = 0

   ...: for sample_id, sample_stats in collected_samples.items():

   ...: target_bbox = sample_stats["gt_bboxes"]

   ...: target_class = sample_stats["gt_labels"]

   ...:

   ...: # collect WSOL results statistics

   ...: total_samples += 1

   ...:

   ...: is_correct_label = []

   ...: is_correct_box = []

   ...: cnt_predictions = 0

   ...: for prediction in sample_stats["predictions"]:

   ...: for t_class, t_bbox in zip(target_class, target_bbox):

   ...: if prediction["prediction_label"] == t_class:

   ...: is_correct_label.append(True)

   ...: else:

   ...: is_correct_label.append(False)

   ...: iou = region_iou(prediction["final_glimpse_loc_and_dim"], t_bbox.unsqueeze(0))

   ...: if (iou >= 0.5).item():

   ...: is_correct_box.append(True)

   ...: else:

   ...: is_correct_box.append(False)

   ...: cnt_predictions += 1

   ...: if cnt_predictions == 1: # limit the number of predictions per image

   ...: break

   ...:

   ...: is_correct_label = torch.tensor(is_correct_label)

   ...: is_correct_box = torch.tensor(is_correct_box)

   ...: acc_correct_class += torch.any(is_correct_label).sum().item()

   ...: acc_localization += torch.any(is_correct_box).sum().item()

   ...: acc_class_localized += torch.any(torch.logical_and(is_correct_label, is_correct_box)).sum().item()

   ...: print("TEST (WSOL) STATS: Top-1 Cls: {:.4f} [{}/{}] | GT Loc: {:.4f} [{}/{}] | Top-1 Loc: {:.4f} [{}/{}]\n".format(

   ...: (100.*acc_correct_class/total_samples), acc_correct_class, total_samples,

   ...: (100.*acc_localization/total_samples), acc_localization, total_samples,

   ...: (100.*acc_class_localized/total_samples), acc_class_localized, total_samples))

TEST (WSOL) STATS: Top-1 Cls: 72.9860 [36493/50000] | GT Loc: 62.4500 [31225/50000] | Top-1 Loc: 49.9320 [24966/50000]



In [8]: ## For WSOL results: Top 3 predictions


In [9]: acc_correct_class = 0

   ...: acc_localization = 0

   ...: acc_class_localized = 0

   ...: total_samples = 0

   ...: for sample_id, sample_stats in collected_samples.items():

   ...: target_bbox = sample_stats["gt_bboxes"]

   ...: target_class = sample_stats["gt_labels"]

   ...:

   ...: # collect WSOL results statistics

   ...: total_samples += 1

   ...:

   ...: is_correct_label = []

   ...: is_correct_box = []

   ...: cnt_predictions = 0

   ...: for prediction in sample_stats["predictions"]:

   ...: for t_class, t_bbox in zip(target_class, target_bbox):

   ...: if prediction["prediction_label"] == t_class:

   ...: is_correct_label.append(True)

   ...: else:

   ...: is_correct_label.append(False)

   ...: iou = region_iou(prediction["final_glimpse_loc_and_dim"], t_bbox.unsqueeze(0))

   ...: if (iou >= 0.5).item():

   ...: is_correct_box.append(True)

   ...: else:

   ...: is_correct_box.append(False)

   ...: cnt_predictions += 1

   ...: if cnt_predictions == 3: # limit the number of predictions per image

   ...: break

   ...:

   ...: is_correct_label = torch.tensor(is_correct_label)

   ...: is_correct_box = torch.tensor(is_correct_box)

   ...: acc_correct_class += torch.any(is_correct_label).sum().item()

   ...: acc_localization += torch.any(is_correct_box).sum().item()

   ...: acc_class_localized += torch.any(torch.logical_and(is_correct_label, is_correct_box)).sum().item()

   ...: print("TEST (WSOL) STATS: Top-1 Cls: {:.4f} [{}/{}] | GT Loc: {:.4f} [{}/{}] | Top-1 Loc: {:.4f} [{}/{}]\n".format(

   ...: (100.*acc_correct_class/total_samples), acc_correct_class, total_samples,

   ...: (100.*acc_localization/total_samples), acc_localization, total_samples,

   ...: (100.*acc_class_localized/total_samples), acc_class_localized, total_samples))

TEST (WSOL) STATS: Top-1 Cls: 74.7960 [37398/50000] | GT Loc: 67.3800 [33690/50000] | Top-1 Loc: 53.3120 [26656/50000]



In [10]: ## For WSOL results: Top 5 predictions


In [11]: acc_correct_class = 0

    ...: acc_localization = 0

    ...: acc_class_localized = 0

    ...: total_samples = 0

    ...: for sample_id, sample_stats in collected_samples.items():

    ...: target_bbox = sample_stats["gt_bboxes"]

    ...: target_class = sample_stats["gt_labels"]

    ...:

    ...: # collect WSOL results statistics

    ...: total_samples += 1

    ...:

    ...: is_correct_label = []

    ...: is_correct_box = []

    ...: cnt_predictions = 0

    ...: for prediction in sample_stats["predictions"]:

    ...: for t_class, t_bbox in zip(target_class, target_bbox):

    ...: if prediction["prediction_label"] == t_class:

    ...: is_correct_label.append(True)

    ...: else:

    ...: is_correct_label.append(False)

    ...: iou = region_iou(prediction["final_glimpse_loc_and_dim"], t_bbox.unsqueeze(0))

    ...: if (iou >= 0.5).item():

    ...: is_correct_box.append(True)

    ...: else:

    ...: is_correct_box.append(False)

    ...: cnt_predictions += 1

    ...: if cnt_predictions == 5: # limit the number of predictions per image

    ...: break

    ...:

    ...: is_correct_label = torch.tensor(is_correct_label)

    ...: is_correct_box = torch.tensor(is_correct_box)

    ...: acc_correct_class += torch.any(is_correct_label).sum().item()

    ...: acc_localization += torch.any(is_correct_box).sum().item()

    ...: acc_class_localized += torch.any(torch.logical_and(is_correct_label, is_correct_box)).sum().item()

    ...: print("TEST (WSOL) STATS: Top-1 Cls: {:.4f} [{}/{}] | GT Loc: {:.4f} [{}/{}] | Top-1 Loc: {:.4f} [{}/{}]\n".format(

    ...: (100.*acc_correct_class/total_samples), acc_correct_class, total_samples,

    ...: (100.*acc_localization/total_samples), acc_localization, total_samples,

    ...: (100.*acc_class_localized/total_samples), acc_class_localized, total_samples))

TEST (WSOL) STATS: Top-1 Cls: 74.8580 [37429/50000] | GT Loc: 67.5120 [33756/50000] | Top-1 Loc: 53.3960 [26698/50000]



In [12]: ## For WSOL results: Unlimited predictions


In [13]: acc_correct_class = 0

    ...: acc_localization = 0

    ...: acc_class_localized = 0

    ...: total_samples = 0

    ...: for sample_id, sample_stats in collected_samples.items():

    ...: target_bbox = sample_stats["gt_bboxes"]

    ...: target_class = sample_stats["gt_labels"]

    ...:

    ...: # collect WSOL results statistics

    ...: total_samples += 1

    ...:

    ...: is_correct_label = []

    ...: is_correct_box = []

    ...: cnt_predictions = 0

    ...: for prediction in sample_stats["predictions"]:

    ...: for t_class, t_bbox in zip(target_class, target_bbox):

    ...: if prediction["prediction_label"] == t_class:

    ...: is_correct_label.append(True)

    ...: else:

    ...: is_correct_label.append(False)

    ...: iou = region_iou(prediction["final_glimpse_loc_and_dim"], t_bbox.unsqueeze(0))

    ...: if (iou >= 0.5).item():

    ...: is_correct_box.append(True)

    ...: else:

    ...: is_correct_box.append(False)

    ...: cnt_predictions += 1

    ...: #if cnt_predictions == 5: # limit the number of predictions per image

    ...: # break

    ...:

    ...: is_correct_label = torch.tensor(is_correct_label)

    ...: is_correct_box = torch.tensor(is_correct_box)

    ...: acc_correct_class += torch.any(is_correct_label).sum().item()

    ...: acc_localization += torch.any(is_correct_box).sum().item()

    ...: acc_class_localized += torch.any(torch.logical_and(is_correct_label, is_correct_box)).sum().item()

    ...: print("TEST (WSOL) STATS: Top-1 Cls: {:.4f} [{}/{}] | GT Loc: {:.4f} [{}/{}] | Top-1 Loc: {:.4f} [{}/{}]\n".format(

    ...: (100.*acc_correct_class/total_samples), acc_correct_class, total_samples,

    ...: (100.*acc_localization/total_samples), acc_localization, total_samples,

    ...: (100.*acc_class_localized/total_samples), acc_class_localized, total_samples))

TEST (WSOL) STATS: Top-1 Cls: 74.8620 [37431/50000] | GT Loc: 67.5220 [33761/50000] | Top-1 Loc: 53.4080 [26704/50000]



In [14]: