#| default_exp model.optimization.nn.tsc.vittsc.face_detection_evaluation_mask
%load_ext autoreload
%autoreload 2
# declare a list tasks whose products you want to use as inputs
upstream = ['tabular_to_timeseries_face_detection', 'model_training_face_detection']
# Parameters
upstream = {"model_training_face_detection": {"nb": "/home/ubuntu/vitmtsc_nbdev/output/401_model.optimization.nn.tsc.vittsc.face_detection_training_mask_tune.html", "FaceDetection_MODEL_TUNE_OUTPUT": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/ray_results", "FaceDetection_MODEL_TRAINING_OUTPUT": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/experiments_result", "FaceDetection_MODEL_TRAINING_CHECKPOINT_OUTPUT": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/experiments_result/checkpoint", "FaceDetection_BEST_MODEL": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/experiments_result/best_model.ckpt", "FaceDetection_BEST_MODEL_CONFIG": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/experiments_result/best_model_config.json"}, "tabular_to_timeseries_face_detection": {"nb": "/home/ubuntu/vitmtsc_nbdev/output/301_feature_preprocessing.face_detection.tabular_to_timeseries.html", "FaceDetection_TRAIN_MODEL_INPUT": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/target_encoding-nn/train", "FaceDetection_VALID_MODEL_INPUT": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/target_encoding-nn/valid", "FaceDetection_TEST_MODEL_INPUT": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/target_encoding-nn/test"}}
product = {"nb": "/home/ubuntu/vitmtsc_nbdev/output/501_model.optimization.nn.tsc.vittsc.face_detection_evaluation_mask.html", "FaceDetection_MODEL_VALID_EVAL_OUTPUT": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/experiments_result/evaluation/valid", "FaceDetection_MODEL_TEST_EVAL_OUTPUT": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/experiments_result/evaluation/test"}
#| hide
from nbdev.showdoc import *
#| export
import sys
import pathlib as p
def is_running_from_ipython():
from IPython import get_ipython
return get_ipython() is not None
if not is_running_from_ipython() and __package__ is None:
DIR = p.Path(__file__).resolve().parent
sys.path.insert(0, str(DIR.parent))
__package__ = DIR.name
#| export
from vitmtsc.model.optimization.nn.tsc.vittsc.face_detection_training_mask_tune import *
class_weight: [0.49978787 0.50021231]
# |export
upstream = {
"tabular_to_timeseries_face_detection": {
"nb": "/home/ubuntu/vitmtsc_nbdev/output/301_feature_preprocessing.face_detection.tabular_to_timeseries.html",
"FaceDetection_TRAIN_MODEL_INPUT": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/target_encoding-nn/train",
"FaceDetection_VALID_MODEL_INPUT": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/target_encoding-nn/valid",
"FaceDetection_TEST_MODEL_INPUT": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/target_encoding-nn/test",
},
"model_training_face_detection": {
"nb": "/home/ubuntu/vitmtsc_nbdev/output/401_model.optimization.nn.tsc.vittsc.face_detection_training_mask_tune.html",
"FaceDetection_MODEL_TUNE_OUTPUT": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/ray_results",
"FaceDetection_MODEL_TRAINING_OUTPUT": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/experiments_result",
"FaceDetection_MODEL_TRAINING_CHECKPOINT_OUTPUT": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/experiments_result/checkpoint",
"FaceDetection_BEST_MODEL": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/experiments_result/best_model.ckpt",
"FaceDetection_BEST_MODEL_CONFIG": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/experiments_result/best_model_config.json",
},
}
product = {
"nb": "/home/ubuntu/vitmtsc_nbdev/output/501_model.optimization.nn.tsc.vittsc.face_detection_evaluation_mask.html",
"FaceDetection_MODEL_VALID_EVAL_OUTPUT": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/experiments_result/evaluation/valid",
"FaceDetection_MODEL_TEST_EVAL_OUTPUT": "/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/experiments_result/evaluation/test",
}
# |export
import json
def get_best_model_config():
with open(upstream['model_training_face_detection']['FaceDetection_BEST_MODEL_CONFIG'], 'r') as json_file:
return json.load(json_file)
#| export
import pandas as pd
import os
import torch
import math
import glob
import pytorch_lightning as pl
from torch.nn import functional as F
import matplotlib.pyplot as plt
import scikitplot as skplt
from pytorch_lightning import LightningModule
from pytorch_lightning import Trainer
from petastorm import make_batch_reader
from petastorm.pytorch import DataLoader
Load Model
Model Evaluation: Evaluate Model on test and validation dataset using PR-AUC
#| export
DATASET_NAME = 'FaceDetection'
VALID_DATA_DIR = f"file://{upstream['tabular_to_timeseries_face_detection']['FaceDetection_VALID_MODEL_INPUT']}"
TEST_DATA_DIR = f"file://{upstream['tabular_to_timeseries_face_detection']['FaceDetection_TEST_MODEL_INPUT']}"
VALID_EVAL_OUTPUT_DIR = product['FaceDetection_MODEL_VALID_EVAL_OUTPUT']
TEST_EVAL_OUTPUT_DIR = product['FaceDetection_MODEL_TEST_EVAL_OUTPUT']
BEST_MODEL_CHECKPOINT = upstream['model_training_face_detection']['FaceDetection_BEST_MODEL']
NUM_WORKERS=1
SHARD_COUNT=1
BATCH_SIZE = 64
TOTAL_VALID_BATCHES = math.ceil(get_valid_dataset_size()/BATCH_SIZE)
TOTAL_TEST_BATCHES = math.ceil(get_test_dataset_size()/BATCH_SIZE)
BEST_MODEL_CHECKPOINT, TOTAL_VALID_BATCHES, TOTAL_TEST_BATCHES, VALID_DATA_DIR, TEST_DATA_DIR, VALID_EVAL_OUTPUT_DIR, TEST_EVAL_OUTPUT_DIR
('/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/experiments_result/best_model.ckpt', 19, 56, 'file:///home/ubuntu/vitmtsc_nbdev/output/FaceDetection/target_encoding-nn/valid', 'file:///home/ubuntu/vitmtsc_nbdev/output/FaceDetection/target_encoding-nn/test', '/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/experiments_result/evaluation/valid', '/home/ubuntu/vitmtsc_nbdev/output/FaceDetection/experiments_result/evaluation/test')
!mkdir -p $VALID_EVAL_OUTPUT_DIR
!mkdir -p $TEST_EVAL_OUTPUT_DIR
#| export
class VitMTSCClassificationPredictionTask(LightningModule):
def __init__(self,
model,
output_pred_dir,
input_data_dir,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
shard_count = SHARD_COUNT):
super().__init__()
pl.seed_everything(42, workers=True)
self.model = model
self.case_id = []
self.probability_0 = []
self.probability_1 = []
self.prediction = []
self.target = []
self.output_pred_dir = output_pred_dir
self.input_data_dir = input_data_dir
self.prediction_files = input_data_dir
self.batch_size = batch_size
self.num_workers = num_workers
self.shard_count = shard_count
def test_step(self, batch, batch_idx):
x, y, case_id_1, mask = batch
y_hat = self.model(x, mask)
self.case_id.extend(case_id_1.to('cpu').numpy())
self.probability_0.extend(F.softmax(y_hat, dim=1)[:,0].to('cpu').numpy())
self.probability_1.extend(F.softmax(y_hat, dim=1)[:,1].to('cpu').numpy())
self.prediction.extend(torch.max(y_hat.data, 1)[1].to('cpu').numpy())
self.target.extend(y.to('cpu').numpy())
def test_dataloader(self):
print('test_dataloader: local rank :', int(os.environ['LOCAL_RANK']), 'shard count: ', self.shard_count)
self.test_ds = make_batch_reader(self.prediction_files, workers_count=self.num_workers,
cur_shard = int(os.environ['LOCAL_RANK']),
shard_count = self.shard_count, num_epochs = 2)
return DataLoader(self.test_ds, batch_size = self.batch_size, collate_fn= petastorm_collate_fn)
def test_epoch_end(self, outputs):
print('Consolidating predictions on GPU:', os.environ['LOCAL_RANK'])
df_text_predictions = pd.DataFrame({'case_id': self.case_id,
'probability_0': self.probability_0,
'probability_1': self.probability_1,
'prediction': self.prediction,
'target': self.target
})
print('Writing predictions on GPU:', os.environ['LOCAL_RANK'])
df_text_predictions.to_csv(self.output_pred_dir + "/" + os.environ['LOCAL_RANK'] + '_predictions.csv', index=False)
print('Finished Writing predictions on GPU:', os.environ['LOCAL_RANK'])
#| export
def get_model_for_prediction(BEST_MODEL_CHECKPOINT, config, output_pred_dir, input_data_dir, shard_count = SHARD_COUNT):
# load the best model
pl.seed_everything(42, workers=True)
model = VitTimeSeriesTransformer.load_from_checkpoint(BEST_MODEL_CHECKPOINT, config = config)
model.eval()
return VitMTSCClassificationPredictionTask(model = model, shard_count = shard_count, output_pred_dir = output_pred_dir, input_data_dir = input_data_dir)
#| export
def write_prediction_for_valid_dataset(BEST_MODEL_CHECKPOINT,
config,
shard_count,
output_pred_dir = VALID_EVAL_OUTPUT_DIR,
input_data_dir=VALID_DATA_DIR):
pl.seed_everything(42, workers=True)
model = get_model_for_prediction(BEST_MODEL_CHECKPOINT = BEST_MODEL_CHECKPOINT,
config = config,
shard_count = shard_count,
output_pred_dir = output_pred_dir,
input_data_dir = input_data_dir)
trainer = Trainer(gpus = [0],
accelerator='dp',
progress_bar_refresh_rate=1,
limit_test_batches = TOTAL_VALID_BATCHES)
trainer.test(model)
def write_prediction_for_test_dataset(BEST_MODEL_CHECKPOINT,
config,
shard_count,
output_pred_dir = TEST_EVAL_OUTPUT_DIR,
input_data_dir=TEST_DATA_DIR):
pl.seed_everything(42, workers=True)
model = get_model_for_prediction(BEST_MODEL_CHECKPOINT = BEST_MODEL_CHECKPOINT,
config = config,
shard_count = shard_count,
output_pred_dir = output_pred_dir,
input_data_dir = input_data_dir)
trainer = Trainer(gpus = [0],
accelerator='dp',
progress_bar_refresh_rate=1,
limit_test_batches = TOTAL_TEST_BATCHES)
trainer.test(model)
%env LOCAL_RANK=0
env: LOCAL_RANK=0
#| export
if __name__ == "__main__":
print('Processing valid dataset...\n')
write_prediction_for_valid_dataset(BEST_MODEL_CHECKPOINT = BEST_MODEL_CHECKPOINT,
config = get_best_model_config(),
shard_count = SHARD_COUNT)
print('Finished Processing valid dataset!!!\n')
print('Processing test dataset...\n')
write_prediction_for_test_dataset(BEST_MODEL_CHECKPOINT = BEST_MODEL_CHECKPOINT,
config = get_best_model_config(),
shard_count = SHARD_COUNT)
print('Finished Processing test dataset!!!\n')
Global seed set to 42 Global seed set to 42 Global seed set to 42 /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/pytorch_lightning/trainer/connectors/accelerator_connector.py:297: LightningDeprecationWarning: Passing `Trainer(accelerator='dp')` has been deprecated in v1.5 and will be removed in v1.7. Use `Trainer(strategy='dp')` instead. rank_zero_deprecation( /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/pytorch_lightning/loops/utilities.py:91: PossibleUserWarning: `max_epochs` was not set. Setting it to 1000 epochs. To train without an epoch limit, set `max_epochs=-1`. rank_zero_warn( /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/pytorch_lightning/trainer/connectors/callback_connector.py:96: LightningDeprecationWarning: Setting `Trainer(progress_bar_refresh_rate=1)` is deprecated in v1.5 and will be removed in v1.7. Please pass `pytorch_lightning.callbacks.progress.TQDMProgressBar` with `refresh_rate` directly to the Trainer's `callbacks` argument instead. Or, to disable the progress bar pass `enable_progress_bar = False` to the Trainer. rank_zero_deprecation( GPU available: True, used: True TPU available: False, using: 0 TPU cores IPU available: False, using: 0 IPUs HPU available: False, using: 0 HPUs Missing logger folder: /home/ubuntu/vitmtsc_nbdev/lightning_logs
Processing valid dataset...
LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1,2,3] /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/fs_utils.py:88: FutureWarning: pyarrow.localfs is deprecated as of 2.0.0, please use pyarrow.fs.LocalFileSystem instead. self._filesystem = pyarrow.localfs /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/etl/dataset_metadata.py:402: FutureWarning: Specifying the 'metadata_nthreads' argument is deprecated as of pyarrow 8.0.0, and the argument will be removed in a future version dataset = pq.ParquetDataset(path_or_paths, filesystem=fs, validate_schema=False, metadata_nthreads=10) /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/etl/dataset_metadata.py:362: FutureWarning: 'ParquetDataset.common_metadata' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. if not dataset.common_metadata: /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/reader.py:405: FutureWarning: Specifying the 'metadata_nthreads' argument is deprecated as of pyarrow 8.0.0, and the argument will be removed in a future version self.dataset = pq.ParquetDataset(dataset_path, filesystem=pyarrow_filesystem, /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/unischema.py:317: FutureWarning: 'ParquetDataset.pieces' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. Specify 'use_legacy_dataset=False' while constructing the ParquetDataset, and then use the '.fragments' attribute instead. meta = parquet_dataset.pieces[0].get_metadata() /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/unischema.py:321: FutureWarning: 'ParquetDataset.partitions' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. Specify 'use_legacy_dataset=False' while constructing the ParquetDataset, and then use the '.partitioning' attribute instead. for partition in (parquet_dataset.partitions or []): /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/etl/dataset_metadata.py:253: FutureWarning: 'ParquetDataset.metadata' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. metadata = dataset.metadata /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/etl/dataset_metadata.py:254: FutureWarning: 'ParquetDataset.common_metadata' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. common_metadata = dataset.common_metadata /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/etl/dataset_metadata.py:350: FutureWarning: 'ParquetDataset.pieces' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. Specify 'use_legacy_dataset=False' while constructing the ParquetDataset, and then use the '.fragments' attribute instead. futures_list = [thread_pool.submit(_split_piece, piece, dataset.fs.open) for piece in dataset.pieces] /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/etl/dataset_metadata.py:350: FutureWarning: 'ParquetDataset.fs' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. Specify 'use_legacy_dataset=False' while constructing the ParquetDataset, and then use the '.filesystem' attribute instead. futures_list = [thread_pool.submit(_split_piece, piece, dataset.fs.open) for piece in dataset.pieces]
test_dataloader: local rank : 0 shard count: 1
/home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/etl/dataset_metadata.py:334: FutureWarning: ParquetDatasetPiece is deprecated as of pyarrow 5.0.0 and will be removed in a future version. return [pq.ParquetDatasetPiece(piece.path, open_file_func=fs_open, /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/arrow_reader_worker.py:138: FutureWarning: 'ParquetDataset.fs' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. Specify 'use_legacy_dataset=False' while constructing the ParquetDataset, and then use the '.filesystem' attribute instead. parquet_file = ParquetFile(self._dataset.fs.open(piece.path))
Testing: 0it [00:00, ?it/s]
/home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/arrow_reader_worker.py:286: FutureWarning: 'ParquetDataset.partitions' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. Specify 'use_legacy_dataset=False' while constructing the ParquetDataset, and then use the '.partitioning' attribute instead. partition_names = self._dataset.partitions.partition_names if self._dataset.partitions else set() /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/arrow_reader_worker.py:289: FutureWarning: 'ParquetDataset.partitions' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. Specify 'use_legacy_dataset=False' while constructing the ParquetDataset, and then use the '.partitioning' attribute instead. table = piece.read(columns=column_names - partition_names, partitions=self._dataset.partitions) Global seed set to 42 Global seed set to 42 Global seed set to 42 /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/pytorch_lightning/trainer/connectors/accelerator_connector.py:297: LightningDeprecationWarning: Passing `Trainer(accelerator='dp')` has been deprecated in v1.5 and will be removed in v1.7. Use `Trainer(strategy='dp')` instead. rank_zero_deprecation( /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/pytorch_lightning/loops/utilities.py:91: PossibleUserWarning: `max_epochs` was not set. Setting it to 1000 epochs. To train without an epoch limit, set `max_epochs=-1`. rank_zero_warn( /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/pytorch_lightning/trainer/connectors/callback_connector.py:96: LightningDeprecationWarning: Setting `Trainer(progress_bar_refresh_rate=1)` is deprecated in v1.5 and will be removed in v1.7. Please pass `pytorch_lightning.callbacks.progress.TQDMProgressBar` with `refresh_rate` directly to the Trainer's `callbacks` argument instead. Or, to disable the progress bar pass `enable_progress_bar = False` to the Trainer. rank_zero_deprecation( GPU available: True, used: True TPU available: False, using: 0 TPU cores IPU available: False, using: 0 IPUs HPU available: False, using: 0 HPUs LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1,2,3]
Consolidating predictions on GPU: 0 Writing predictions on GPU: 0 Finished Writing predictions on GPU: 0 Finished Processing valid dataset!!! Processing test dataset... test_dataloader: local rank : 0 shard count: 1
/home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/fs_utils.py:88: FutureWarning: pyarrow.localfs is deprecated as of 2.0.0, please use pyarrow.fs.LocalFileSystem instead. self._filesystem = pyarrow.localfs /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/etl/dataset_metadata.py:402: FutureWarning: Specifying the 'metadata_nthreads' argument is deprecated as of pyarrow 8.0.0, and the argument will be removed in a future version dataset = pq.ParquetDataset(path_or_paths, filesystem=fs, validate_schema=False, metadata_nthreads=10) /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/etl/dataset_metadata.py:362: FutureWarning: 'ParquetDataset.common_metadata' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. if not dataset.common_metadata: /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/reader.py:405: FutureWarning: Specifying the 'metadata_nthreads' argument is deprecated as of pyarrow 8.0.0, and the argument will be removed in a future version self.dataset = pq.ParquetDataset(dataset_path, filesystem=pyarrow_filesystem, /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/unischema.py:317: FutureWarning: 'ParquetDataset.pieces' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. Specify 'use_legacy_dataset=False' while constructing the ParquetDataset, and then use the '.fragments' attribute instead. meta = parquet_dataset.pieces[0].get_metadata() /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/unischema.py:321: FutureWarning: 'ParquetDataset.partitions' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. Specify 'use_legacy_dataset=False' while constructing the ParquetDataset, and then use the '.partitioning' attribute instead. for partition in (parquet_dataset.partitions or []): /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/etl/dataset_metadata.py:253: FutureWarning: 'ParquetDataset.metadata' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. metadata = dataset.metadata /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/etl/dataset_metadata.py:254: FutureWarning: 'ParquetDataset.common_metadata' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. common_metadata = dataset.common_metadata /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/etl/dataset_metadata.py:350: FutureWarning: 'ParquetDataset.pieces' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. Specify 'use_legacy_dataset=False' while constructing the ParquetDataset, and then use the '.fragments' attribute instead. futures_list = [thread_pool.submit(_split_piece, piece, dataset.fs.open) for piece in dataset.pieces] /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/etl/dataset_metadata.py:350: FutureWarning: 'ParquetDataset.fs' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. Specify 'use_legacy_dataset=False' while constructing the ParquetDataset, and then use the '.filesystem' attribute instead. futures_list = [thread_pool.submit(_split_piece, piece, dataset.fs.open) for piece in dataset.pieces] /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/etl/dataset_metadata.py:334: FutureWarning: ParquetDatasetPiece is deprecated as of pyarrow 5.0.0 and will be removed in a future version. return [pq.ParquetDatasetPiece(piece.path, open_file_func=fs_open, /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/arrow_reader_worker.py:138: FutureWarning: 'ParquetDataset.fs' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. Specify 'use_legacy_dataset=False' while constructing the ParquetDataset, and then use the '.filesystem' attribute instead. parquet_file = ParquetFile(self._dataset.fs.open(piece.path))
Testing: 0it [00:00, ?it/s]
/home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/arrow_reader_worker.py:286: FutureWarning: 'ParquetDataset.partitions' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. Specify 'use_legacy_dataset=False' while constructing the ParquetDataset, and then use the '.partitioning' attribute instead. partition_names = self._dataset.partitions.partition_names if self._dataset.partitions else set() /home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/petastorm/arrow_reader_worker.py:289: FutureWarning: 'ParquetDataset.partitions' attribute is deprecated as of pyarrow 5.0.0 and will be removed in a future version. Specify 'use_legacy_dataset=False' while constructing the ParquetDataset, and then use the '.partitioning' attribute instead. table = piece.read(columns=column_names - partition_names, partitions=self._dataset.partitions)
Consolidating predictions on GPU: 0 Writing predictions on GPU: 0 Finished Writing predictions on GPU: 0 Finished Processing test dataset!!!
import scikitplot as skplt
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score
from sklearn.metrics import f1_score
valid_gdf = pd.concat(map(pd.read_csv, glob.glob(f'{VALID_EVAL_OUTPUT_DIR}/*.csv')))
valid_gdf['target'] = valid_gdf['target'].astype('int64')
valid_gdf['case_id'] = valid_gdf['case_id'].astype('int64')
valid_gdf = valid_gdf.drop_duplicates()
valid_gdf
case_id | probability_0 | probability_1 | prediction | target | |
---|---|---|---|---|---|
0 | 3784 | 0.774391 | 0.225609 | 0 | 0 |
1 | 408 | 0.075318 | 0.924682 | 1 | 1 |
2 | 5582 | 0.765467 | 0.234533 | 0 | 0 |
3 | 465 | 0.515246 | 0.484754 | 0 | 0 |
4 | 4169 | 0.949881 | 0.050119 | 0 | 0 |
... | ... | ... | ... | ... | ... |
1173 | 1561 | 0.861832 | 0.138168 | 0 | 1 |
1174 | 624 | 0.928847 | 0.071153 | 0 | 0 |
1175 | 486 | 0.086124 | 0.913876 | 1 | 1 |
1176 | 3880 | 0.729072 | 0.270928 | 0 | 0 |
1177 | 3185 | 0.937935 | 0.062065 | 0 | 1 |
1178 rows × 5 columns
valid_gdf[valid_gdf.prediction == valid_gdf.target].count()
case_id 857 probability_0 857 probability_1 857 prediction 857 target 857 dtype: int64
valid_gdf['target'].min(), valid_gdf['prediction'].min(), valid_gdf['target'].max(), valid_gdf['prediction'].max()
(0, 0, 1, 1)
skplt.metrics.plot_precision_recall(valid_gdf['target'].to_numpy(),
valid_gdf[['probability_0', 'probability_1']].to_numpy(),
cmap='nipy_spectral')
plt.show()
skplt.metrics.plot_roc(valid_gdf['target'].to_numpy(),
valid_gdf[['probability_0', 'probability_1']].to_numpy(),
cmap='nipy_spectral')
plt.show()
f1_score(valid_gdf['target'], valid_gdf['prediction'], average='macro')
0.7274804819673053
f1_score(valid_gdf['target'], valid_gdf['prediction'], average='weighted')
0.7274848024245539
test_gdf = pd.concat(map(pd.read_csv, glob.glob(f'{TEST_EVAL_OUTPUT_DIR}/*.csv')))
test_gdf['target'] = test_gdf['target'].astype('int64')
test_gdf['case_id'] = test_gdf['case_id'].astype('int64')
test_gdf = test_gdf.drop_duplicates()
test_gdf
case_id | probability_0 | probability_1 | prediction | target | |
---|---|---|---|---|---|
0 | 2793 | 0.132533 | 0.867467 | 1 | 1 |
1 | 2157 | 0.521037 | 0.478963 | 0 | 0 |
2 | 965 | 0.377667 | 0.622333 | 1 | 1 |
3 | 1632 | 0.061379 | 0.938621 | 1 | 1 |
4 | 540 | 0.868973 | 0.131027 | 0 | 0 |
... | ... | ... | ... | ... | ... |
3519 | 1926 | 0.354018 | 0.645982 | 1 | 0 |
3520 | 1862 | 0.248666 | 0.751334 | 1 | 0 |
3521 | 2977 | 0.247570 | 0.752430 | 1 | 1 |
3522 | 1747 | 0.909756 | 0.090244 | 0 | 0 |
3523 | 3039 | 0.163188 | 0.836812 | 1 | 1 |
3524 rows × 5 columns
test_gdf[test_gdf.prediction == test_gdf.target].count()
case_id 2364 probability_0 2364 probability_1 2364 prediction 2364 target 2364 dtype: int64
test_gdf['target'].min(), test_gdf['prediction'].min(), test_gdf['target'].max(), test_gdf['prediction'].max()
(0, 0, 1, 1)
skplt.metrics.plot_precision_recall(test_gdf['target'].to_numpy(),
test_gdf[['probability_0', 'probability_1']].to_numpy(),
cmap='nipy_spectral')
plt.show()
skplt.metrics.plot_roc(test_gdf['target'].to_numpy(),
test_gdf[['probability_0', 'probability_1']].to_numpy(),
cmap='nipy_spectral')
plt.show()
f1_score(test_gdf['target'], test_gdf['prediction'], average='macro')
0.67062321043424
f1_score(test_gdf['target'], test_gdf['prediction'], average='weighted')
0.6706232104342401
We shutdown the kernel!!!
from nbdev import nbdev_export
nbdev_export()