In [1]:
#| default_exp feature_preprocessing.pen_digits.tabular_to_timeseries
%load_ext autoreload
%autoreload 2
In [2]:
# declare a list tasks whose products you want to use as inputs
upstream = ['feature_preprocessing_pen_digits']
In [3]:
# Parameters
upstream = {"feature_preprocessing_pen_digits": {"nb": "/home/ubuntu/vitmtsc_nbdev/output/203_feature_preprocessing.pen_digits.target_encoding.html", "PenDigits_TRAIN_TE": "/home/ubuntu/vitmtsc_nbdev/output/PenDigits/target_encoding/train", "PenDigits_VALID_TE": "/home/ubuntu/vitmtsc_nbdev/output/PenDigits/target_encoding/valid", "PenDigits_TEST_TE": "/home/ubuntu/vitmtsc_nbdev/output/PenDigits/target_encoding/test", "PenDigits_workflow_dir": "/home/ubuntu/vitmtsc_nbdev/output/PenDigits/target_encoding/nvtabular_workflow"}}
product = {"nb": "/home/ubuntu/vitmtsc_nbdev/output/303_feature_preprocessing.pen_digits.tabular_to_timeseries.html", "PenDigits_TRAIN_MODEL_INPUT": "/home/ubuntu/vitmtsc_nbdev/output/PenDigits/target_encoding-nn/train", "PenDigits_VALID_MODEL_INPUT": "/home/ubuntu/vitmtsc_nbdev/output/PenDigits/target_encoding-nn/valid", "PenDigits_TEST_MODEL_INPUT": "/home/ubuntu/vitmtsc_nbdev/output/PenDigits/target_encoding-nn/test"}
In [4]:
#| hide
from nbdev.showdoc import *
In [5]:
#| export
from vitmtsc import *
from vitmtsc.core import *
from vitmtsc.data.pen_digits import *
from vitmtsc.feature_preprocessing.pen_digits.target_encoding import *
import os
import glob
In [6]:
#| export
upstream = {
    "feature_preprocessing_pen_digits": {
        "nb": "/home/ubuntu/vitmtsc_nbdev/output/203_feature_preprocessing.pen_digits.target_encoding.html",
        "PenDigits_TRAIN_TE": "/home/ubuntu/vitmtsc_nbdev/output/PenDigits/target_encoding/train",
        "PenDigits_VALID_TE": "/home/ubuntu/vitmtsc_nbdev/output/PenDigits/target_encoding/valid",
        "PenDigits_TEST_TE": "/home/ubuntu/vitmtsc_nbdev/output/PenDigits/target_encoding/test",
        "PenDigits_workflow_dir": "/home/ubuntu/vitmtsc_nbdev/output/PenDigits/target_encoding/nvtabular_workflow",
    }
}
product = {
    "nb": "/home/ubuntu/vitmtsc_nbdev/output/303_feature_preprocessing.pen_digits.tabular_to_timeseries.html",
    "PenDigits_TRAIN_MODEL_INPUT": "/home/ubuntu/vitmtsc_nbdev/output/PenDigits/target_encoding-nn/train",
    "PenDigits_VALID_MODEL_INPUT": "/home/ubuntu/vitmtsc_nbdev/output/PenDigits/target_encoding-nn/valid",
    "PenDigits_TEST_MODEL_INPUT": "/home/ubuntu/vitmtsc_nbdev/output/PenDigits/target_encoding-nn/test",
}

Feature Preprocessing for Neural Networks - III¶

Convert Category Encoding data from tabular to time-series format

In [7]:
from dask.distributed import Client
from dask_cuda import LocalCUDACluster

cluster = LocalCUDACluster(memory_limit='auto', device_memory_limit=0.5, rmm_pool_size='20GB', rmm_managed_memory=True)
client = Client(cluster)
client
2022-09-23 19:02:17,166 - distributed.preloading - INFO - Creating preload: dask_cuda.initialize
2022-09-23 19:02:17,166 - distributed.preloading - INFO - Import preload module: dask_cuda.initialize
2022-09-23 19:02:17,216 - distributed.preloading - INFO - Creating preload: dask_cuda.initialize
2022-09-23 19:02:17,216 - distributed.preloading - INFO - Import preload module: dask_cuda.initialize
2022-09-23 19:02:17,230 - distributed.preloading - INFO - Creating preload: dask_cuda.initialize
2022-09-23 19:02:17,230 - distributed.preloading - INFO - Import preload module: dask_cuda.initialize
2022-09-23 19:02:17,244 - distributed.preloading - INFO - Creating preload: dask_cuda.initialize
2022-09-23 19:02:17,244 - distributed.preloading - INFO - Import preload module: dask_cuda.initialize
Out[7]:

Client

Client-3e7f3432-3b72-11ed-810b-02b68d644837

Connection method: Cluster object Cluster type: dask_cuda.LocalCUDACluster
Dashboard: http://127.0.0.1:8787/status

Cluster Info

LocalCUDACluster

96dcea03

Dashboard: http://127.0.0.1:8787/status Workers: 4
Total threads: 4 Total memory: 150.00 GiB
Status: running Using processes: True

Scheduler Info

Scheduler

Scheduler-3ba0e312-3744-4205-8094-7ea1bcb024f9

Comm: tcp://127.0.0.1:46179 Workers: 4
Dashboard: http://127.0.0.1:8787/status Total threads: 4
Started: Just now Total memory: 150.00 GiB

Workers

Worker: 0

Comm: tcp://127.0.0.1:38033 Total threads: 1
Dashboard: http://127.0.0.1:40941/status Memory: 37.50 GiB
Nanny: tcp://127.0.0.1:35823
Local directory: /tmp/dask-worker-space/worker-jeabj3t4
GPU: Tesla T4 GPU memory: 14.76 GiB

Worker: 1

Comm: tcp://127.0.0.1:42223 Total threads: 1
Dashboard: http://127.0.0.1:35907/status Memory: 37.50 GiB
Nanny: tcp://127.0.0.1:39823
Local directory: /tmp/dask-worker-space/worker-wc5xorgt
GPU: Tesla T4 GPU memory: 14.76 GiB

Worker: 2

Comm: tcp://127.0.0.1:34225 Total threads: 1
Dashboard: http://127.0.0.1:46691/status Memory: 37.50 GiB
Nanny: tcp://127.0.0.1:37621
Local directory: /tmp/dask-worker-space/worker-2f8g2n5b
GPU: Tesla T4 GPU memory: 14.76 GiB

Worker: 3

Comm: tcp://127.0.0.1:38599 Total threads: 1
Dashboard: http://127.0.0.1:40853/status Memory: 37.50 GiB
Nanny: tcp://127.0.0.1:44981
Local directory: /tmp/dask-worker-space/worker-timui3r3
GPU: Tesla T4 GPU memory: 14.76 GiB
In [8]:
#| export
DATASET_NAME = 'PenDigits'
SEQUENCE_LENGTH = 8
NUMBER_OF_FEATURES = 2
NUM_TARGET = 10

Convert from Tabular to Time-Series Format

In [9]:
#| export
MTSC_COLUMN_NAMES = [
'dim_0',
'dim_1']
In [10]:
#| export
ALL_COLUMNS = ['case_id', 'case_id_seq', 'reading_id'] + MTSC_COLUMN_NAMES + ['class_vals']

Input Data Location

In [11]:
target_encoded_train_dir = os.path.join("./", upstream['feature_preprocessing_pen_digits']['PenDigits_TRAIN_TE'])
target_encoded_valid_dir = os.path.join("./", upstream['feature_preprocessing_pen_digits']['PenDigits_TRAIN_TE'])
target_encoded_test_dir = os.path.join("./", upstream['feature_preprocessing_pen_digits']['PenDigits_TEST_TE'])

Output Data Location

In [12]:
output_train_dir = os.path.join("./", product['PenDigits_TRAIN_MODEL_INPUT'])
output_valid_dir = os.path.join("./", product['PenDigits_VALID_MODEL_INPUT'])
output_test_dir = os.path.join("./", product['PenDigits_TEST_MODEL_INPUT'])
In [13]:
!mkdir -p $output_train_dir
!mkdir -p $output_valid_dir
!mkdir -p $output_test_dir

Train Dataset Conversion
¶

Tabular to Time-Series format conversion

In [14]:
%%time
convert_from_tabular_to_timeseries_format(input_dir = target_encoded_train_dir, 
                                          output_dir = output_train_dir, 
                                          all_columns = ALL_COLUMNS,
                                          mtsc_column_names = MTSC_COLUMN_NAMES,
                                          chunk_size_processing = 50000,
                                          number_of_features = NUMBER_OF_FEATURES, 
                                          seq_len = SEQUENCE_LENGTH,
                                          chunk_size_file = 10000)
case_id_seq_min:  1 case_id_seq_max:  7493
Total number of chunks to be processed:  1
Started processing chunk:  0  with case_id_seq from :  0 to  7493
Before CumCount Min:  0 CumCount Max:  7
After CumCount Min:  0 CumCount Max:  7
sorted
flattened_gdf.shape:  (5995, 18)
Total number of files to be created:  1
Writing to output file:  /home/ubuntu/vitmtsc_nbdev/output/PenDigits/target_encoding-nn/train/chunk_0_part_0.parquet with records from iloc:  0 to  5995
Finished processing chunk:  0  with case_id_seq from :  0 to  7493
CPU times: user 2.5 s, sys: 581 ms, total: 3.08 s
Wall time: 4.8 s

Valid Dataset Conversion
¶

Tabular to Time-Series format conversion

In [15]:
%%time
convert_from_tabular_to_timeseries_format(input_dir = target_encoded_valid_dir, 
                                          output_dir = output_valid_dir, 
                                          all_columns = ALL_COLUMNS,
                                          mtsc_column_names = MTSC_COLUMN_NAMES,
                                          chunk_size_processing = 50000,
                                          number_of_features = NUMBER_OF_FEATURES, 
                                          seq_len = SEQUENCE_LENGTH,
                                          chunk_size_file = 10000)
case_id_seq_min:  1 case_id_seq_max:  7493
Total number of chunks to be processed:  1
Started processing chunk:  0  with case_id_seq from :  0 to  7493
Before CumCount Min:  0 CumCount Max:  7
After CumCount Min:  0 CumCount Max:  7
sorted
flattened_gdf.shape:  (5995, 18)
Total number of files to be created:  1
Writing to output file:  /home/ubuntu/vitmtsc_nbdev/output/PenDigits/target_encoding-nn/valid/chunk_0_part_0.parquet with records from iloc:  0 to  5995
Finished processing chunk:  0  with case_id_seq from :  0 to  7493
CPU times: user 117 ms, sys: 20.3 ms, total: 137 ms
Wall time: 179 ms

Test Dataset Conversion
¶

Tabular to Time-Series format conversion

In [16]:
%%time
convert_from_tabular_to_timeseries_format(input_dir = target_encoded_test_dir, 
                                          output_dir = output_test_dir, 
                                          all_columns = ALL_COLUMNS,
                                          mtsc_column_names = MTSC_COLUMN_NAMES,
                                          chunk_size_processing = 50000,
                                          number_of_features = NUMBER_OF_FEATURES, 
                                          seq_len = SEQUENCE_LENGTH,
                                          chunk_size_file = 10000)
case_id_seq_min:  0 case_id_seq_max:  3497
Total number of chunks to be processed:  1
Started processing chunk:  0  with case_id_seq from :  0 to  3497
Before CumCount Min:  0 CumCount Max:  7
After CumCount Min:  0 CumCount Max:  7
sorted
flattened_gdf.shape:  (3498, 18)
Total number of files to be created:  1
Writing to output file:  /home/ubuntu/vitmtsc_nbdev/output/PenDigits/target_encoding-nn/test/chunk_0_part_0.parquet with records from iloc:  0 to  3498
Finished processing chunk:  0  with case_id_seq from :  0 to  3497
CPU times: user 121 ms, sys: 8.57 ms, total: 130 ms
Wall time: 172 ms

Verify Datasets
¶

In [17]:
%%time
import dask_cudf
train_gdf = dask_cudf.read_parquet(output_train_dir)
train_gdf.head()
CPU times: user 30.5 ms, sys: 2.36 ms, total: 32.9 ms
Wall time: 69.4 ms
Out[17]:
dim_0_0 dim_0_1 dim_0_2 dim_0_3 dim_0_4 dim_0_5 dim_0_6 dim_0_7 dim_1_0 dim_1_1 dim_1_2 dim_1_3 dim_1_4 dim_1_5 dim_1_6 dim_1_7 class_vals case_id
0 -0.856053 1.240864 1.477136 0.620649 -0.117702 -0.856053 -1.476268 -1.358132 0.954561 1.380989 1.352560 0.783990 0.215419 -0.353152 -0.921722 -1.461864 1.0 5596.0
1 -1.476268 -0.176770 1.477136 1.299932 0.532047 0.177638 0.059502 0.118570 1.295703 1.352560 1.380989 1.324132 0.670276 -0.040438 -0.751151 -1.461864 7.0 4695.0
2 0.029968 0.886455 1.477136 -0.708383 0.709251 1.270398 0.975057 -1.476268 1.210418 0.215419 1.380989 1.039846 0.442847 0.329133 -1.206007 -1.461864 9.0 2777.0
3 1.063660 -0.531179 -1.446734 -1.476268 0.295775 1.477136 -0.560713 -1.269530 1.380989 1.153561 0.101705 -1.007008 -1.461864 -0.580580 -0.495294 -1.433436 6.0 6982.0
4 -1.476268 -0.147236 0.709251 0.207172 -0.442576 -0.737917 0.089036 1.477136 1.181989 1.380989 0.670276 -0.410009 -1.461864 -0.779580 -0.296295 -0.410009 7.0 242.0
In [18]:
train_gdf.tail()
Out[18]:
dim_0_0 dim_0_1 dim_0_2 dim_0_3 dim_0_4 dim_0_5 dim_0_6 dim_0_7 dim_1_0 dim_1_1 dim_1_2 dim_1_3 dim_1_4 dim_1_5 dim_1_6 dim_1_7 class_vals case_id
5990 -1.121859 -1.476268 0.207172 1.477136 1.299932 0.738785 0.207172 -0.294906 1.181989 1.295703 1.380989 1.039846 0.414419 -0.239437 -0.836437 -1.461864 7.0 4219.0
5991 1.477136 0.354843 -1.476268 -1.092325 0.768319 1.299932 0.650183 -0.117702 1.181989 0.727133 0.272276 1.039846 1.380989 0.442847 -0.523723 -1.461864 9.0 1776.0
5992 0.738785 -0.590247 -1.476268 0.354843 1.477136 0.591115 0.148104 -0.353974 1.380989 0.613418 -0.211009 -0.211009 -0.012009 0.755561 -0.381580 -1.461864 4.0 1015.0
5993 -1.476268 -1.180927 0.177638 1.477136 0.561581 -0.294906 -0.974189 -1.121859 1.380989 1.096703 1.125132 1.153561 0.556561 -0.097295 -0.864865 -1.461864 7.0 7094.0
5994 -1.476268 -0.590247 -0.058634 0.768319 0.856921 0.768319 0.768319 1.477136 -0.381580 0.300705 1.011418 1.380989 0.641847 -0.068866 -0.779580 -1.461864 1.0 3940.0
In [19]:
%%time
train_gdf['case_id'].nunique().compute(), train_gdf['class_vals'].nunique().compute()
CPU times: user 236 ms, sys: 2.43 ms, total: 238 ms
Wall time: 273 ms
Out[19]:
(5995, 10)
In [20]:
%%time
import dask_cudf
valid_gdf = dask_cudf.read_parquet(output_valid_dir)
valid_gdf.head()
CPU times: user 22.5 ms, sys: 2.28 ms, total: 24.7 ms
Wall time: 38 ms
Out[20]:
dim_0_0 dim_0_1 dim_0_2 dim_0_3 dim_0_4 dim_0_5 dim_0_6 dim_0_7 dim_1_0 dim_1_1 dim_1_2 dim_1_3 dim_1_4 dim_1_5 dim_1_6 dim_1_7 class_vals case_id
0 0.000434 -1.121859 0.148104 -0.974189 -1.476268 -1.446734 -0.442576 1.477136 1.324132 0.272276 -0.410009 -1.461864 -0.751151 0.613418 1.380989 1.324132 5.0 6196.0
1 -1.269530 0.118570 0.561581 -0.294906 1.211330 1.477136 0.059502 -1.476268 1.068275 1.380989 0.698704 0.016419 -0.182580 -0.921722 -1.291293 -1.461864 3.0 3507.0
2 -0.501645 0.975057 1.477136 -0.088168 -1.476268 -0.826519 0.089036 -1.446734 0.783990 1.352560 1.380989 0.926132 0.300705 -0.381580 -1.063865 -1.461864 5.0 6895.0
3 1.477136 -0.058634 -0.974189 -1.269530 -0.235838 1.359000 0.295775 -1.476268 1.380989 0.869275 0.101705 -0.779580 -1.461864 -1.120722 -0.523723 -0.665865 6.0 6740.0
4 0.738785 -0.442576 -1.417200 -1.476268 -0.176770 1.477136 0.532047 -1.003723 1.380989 0.869275 0.044848 -0.864865 -1.461864 -1.234436 -0.580580 -0.864865 6.0 1977.0
In [21]:
valid_gdf.tail()
Out[21]:
dim_0_0 dim_0_1 dim_0_2 dim_0_3 dim_0_4 dim_0_5 dim_0_6 dim_0_7 dim_1_0 dim_1_1 dim_1_2 dim_1_3 dim_1_4 dim_1_5 dim_1_6 dim_1_7 class_vals case_id
5990 -0.353974 -1.476268 -0.885587 1.447602 1.477136 0.738785 0.236706 -0.206304 1.380989 0.641847 -0.097295 -0.182580 0.385990 0.329133 -0.580580 -1.461864 4.0 3784.0
5991 0.118570 0.827387 0.591115 -0.324440 -1.476268 -1.003723 0.236706 1.477136 0.499704 1.380989 0.101705 -0.950151 -1.461864 -0.808008 -0.950151 -1.177579 2.0 2039.0
5992 1.093194 0.029968 -1.476268 0.709251 1.477136 1.152262 0.827387 0.295775 1.380989 0.840847 -0.182580 -0.154152 0.414419 1.096703 -0.211009 -1.461864 4.0 893.0
5993 -1.417200 -1.476268 -0.206304 1.063660 1.477136 0.797853 0.177638 0.000434 -0.211009 0.186990 0.783990 1.380989 1.181989 0.329133 -0.552151 -1.461864 1.0 1220.0
5994 1.211330 0.177638 -0.915121 -1.476268 -0.826519 1.477136 1.447602 -0.915121 1.380989 1.125132 0.300705 -0.609008 -1.461864 -1.319722 -0.523723 -0.211009 6.0 7406.0
In [22]:
%%time
valid_gdf['case_id'].nunique().compute(), valid_gdf['class_vals'].nunique().compute()
CPU times: user 35.5 ms, sys: 170 µs, total: 35.7 ms
Wall time: 78.5 ms
Out[22]:
(5995, 10)
In [23]:
%%time
import dask_cudf
test_gdf = dask_cudf.read_parquet(output_test_dir)
test_gdf.head()
CPU times: user 21.6 ms, sys: 1.42 ms, total: 23 ms
Wall time: 32.8 ms
Out[23]:
dim_0_0 dim_0_1 dim_0_2 dim_0_3 dim_0_4 dim_0_5 dim_0_6 dim_0_7 dim_1_0 dim_1_1 dim_1_2 dim_1_3 dim_1_4 dim_1_5 dim_1_6 dim_1_7 class_vals case_id
0 -0.737917 0.532047 0.325309 -1.358132 -0.235838 1.477136 0.532047 -1.476268 0.471276 -0.239437 -1.461864 -1.206007 -0.097295 0.869275 1.380989 0.641847 8.0 819.0
1 0.797853 -0.058634 0.532047 -0.353974 -1.476268 0.000434 1.477136 0.413911 0.926132 0.584990 -0.523723 -1.461864 -0.978579 -0.097295 0.783990 1.380989 8.0 2500.0
2 -0.531179 0.325309 1.477136 0.591115 1.122728 1.122728 -0.206304 -1.476268 0.897704 1.380989 1.181989 0.442847 -0.466866 -1.291293 -1.461864 -1.376579 3.0 2683.0
3 -0.915121 -0.649315 -0.147236 -0.442576 -1.092325 -1.476268 -0.029100 1.477136 0.528133 1.380989 1.181989 -0.068866 -1.262865 -1.461864 -1.262865 -1.092293 1.0 3266.0
4 -0.147236 0.738785 1.477136 0.856921 0.856921 1.181796 -0.029100 -1.476268 0.926132 1.380989 0.783990 0.073276 -0.324723 -0.978579 -1.461864 -1.234436 3.0 2445.0
In [24]:
test_gdf.tail()
Out[24]:
dim_0_0 dim_0_1 dim_0_2 dim_0_3 dim_0_4 dim_0_5 dim_0_6 dim_0_7 dim_1_0 dim_1_1 dim_1_2 dim_1_3 dim_1_4 dim_1_5 dim_1_6 dim_1_7 class_vals case_id
3493 -1.476268 -0.885587 0.207172 -0.147236 -1.062791 -1.239995 0.089036 1.477136 0.329133 1.380989 1.039846 -0.125723 -1.063865 -1.461864 -1.234436 -1.376579 2.0 3181.0
3494 -0.442576 -1.476268 -1.299064 0.207172 1.329466 1.477136 0.266240 -0.590247 0.812418 -0.154152 -1.405007 -1.461864 -0.523723 0.783990 1.380989 0.414419 0.0 2392.0
3495 1.477136 0.886455 -0.235838 -1.121859 -1.476268 -0.029100 0.148104 -1.476268 1.267275 1.380989 0.556561 -0.324723 -1.348150 -1.461864 -0.609008 -0.921722 6.0 1736.0
3496 1.181796 0.148104 -0.974189 -1.476268 -0.442576 1.477136 1.093194 -0.826519 1.380989 0.954561 0.186990 -0.722722 -1.461864 -1.319722 -0.523723 -0.438437 6.0 1641.0
3497 -0.619781 0.059502 0.000434 0.059502 0.059502 -1.476268 -0.147236 1.477136 0.300705 1.380989 1.238846 0.073276 -1.120722 -1.461864 -1.348150 -1.319722 1.0 2782.0
In [25]:
%%time
test_gdf['case_id'].nunique().compute(), test_gdf['class_vals'].nunique().compute()
CPU times: user 35.8 ms, sys: 0 ns, total: 35.8 ms
Wall time: 78 ms
Out[25]:
(3498, 10)

We reset the kernel!!!

In [26]:
%%time
client.shutdown()
client.close()
Traceback (most recent call last):
  File "/home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/distributed/utils.py", line 778, in wrapper
    return await func(*args, **kwargs)
  File "/home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/distributed/client.py", line 1211, in _reconnect
    await self._ensure_connected(timeout=timeout)
  File "/home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/distributed/client.py", line 1241, in _ensure_connected
    comm = await connect(
  File "/home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/distributed/comm/core.py", line 315, in connect
    await asyncio.sleep(backoff)
  File "/home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/asyncio/tasks.py", line 659, in sleep
    return await future
asyncio.exceptions.CancelledError

Traceback (most recent call last):
  File "/home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/distributed/utils.py", line 778, in wrapper
    return await func(*args, **kwargs)
  File "/home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/distributed/client.py", line 1400, in _handle_report
    await self._reconnect()
  File "/home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/distributed/utils.py", line 778, in wrapper
    return await func(*args, **kwargs)
  File "/home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/distributed/client.py", line 1211, in _reconnect
    await self._ensure_connected(timeout=timeout)
  File "/home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/distributed/client.py", line 1241, in _ensure_connected
    comm = await connect(
  File "/home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/site-packages/distributed/comm/core.py", line 315, in connect
    await asyncio.sleep(backoff)
  File "/home/ubuntu/anaconda3/envs/rapids-22.08_ploomber/lib/python3.8/asyncio/tasks.py", line 659, in sleep
    return await future
asyncio.exceptions.CancelledError
CPU times: user 35 ms, sys: 6.82 ms, total: 41.8 ms
Wall time: 608 ms
In [27]:
from nbdev import nbdev_export
nbdev_export()