Mid and High level API to get `timeseries` data in a DataLoaders

test_eq_tensor[source]

test_eq_tensor(a, b)

assert tensor equality

class TensorTS[source]

TensorTS(x, **kwargs) :: TensorBase

Transform a 2D numpy.ndarray into a Tensor

class ToTensorTS[source]

ToTensorTS(enc=None, dec=None, split_idx=None, order=None) :: ItemTransform

A transform that always take tuples as items

TSBlock[source]

TSBlock()

TransformBlock for timeseries : Transform np array to TensorTS type

get_min_max[source]

get_min_max(train, scale_subtype='all_samples')

get_mean_std is only needed when we want to normalize timeseries tensors using ALL SAMPLES statistics

get_mean_std[source]

get_mean_std(train, scale_subtype='all_samples')

get_mean_std is only needed when we want to normalize timeseries tensors using ALL SAMPLES statistics

class Standardize[source]

Standardize(mean=None, std=None, scale_subtype='per_sample_per_channel', cuda=True) :: Transform

In Timerseries Lingo, Standardize means normalize the timeseries (counter-intuitive)

class Normalize[source]

Normalize(min=None, max=None, scale_subtype='per_sample_per_channel', scale_range=(0, 1), cuda=True) :: Transform

In Timerseries Lingo, Normalize means scale the timeseries (counter-intuitive) between its min and max values

default_show_batch[source]

default_show_batch(x, y, samples, ctxs=None, max_n=9, **kwargs)

Example

# path_data = Config().data
# path_data, path_data.ls()
dsname =  'NATOPS' #'NATOPS', 'LSST', 'Wine', 'Epilepsy', 'HandMovementDirection'
# url = 'http://www.timeseriesclassification.com/Downloads/NATOPS.zip'
path = unzip_data(URLs_TS.NATOPS)
path
Path('/home/farid/.fastai/data/NATOPS')
path.ls()
(#54) [Path('/home/farid/.fastai/data/NATOPS/NATOPS.jpg'),Path('/home/farid/.fastai/data/NATOPS/NATOPS.txt'),Path('/home/farid/.fastai/data/NATOPS/NATOPSDimension10_TEST.arff'),Path('/home/farid/.fastai/data/NATOPS/NATOPSDimension10_TRAIN.arff'),Path('/home/farid/.fastai/data/NATOPS/NATOPSDimension11_TEST.arff'),Path('/home/farid/.fastai/data/NATOPS/NATOPSDimension11_TRAIN.arff'),Path('/home/farid/.fastai/data/NATOPS/NATOPSDimension12_TEST.arff'),Path('/home/farid/.fastai/data/NATOPS/NATOPSDimension12_TRAIN.arff'),Path('/home/farid/.fastai/data/NATOPS/NATOPSDimension13_TEST.arff'),Path('/home/farid/.fastai/data/NATOPS/NATOPSDimension13_TRAIN.arff')...]
fname_train = f'{dsname}_TRAIN.arff'
fname_test = f'{dsname}_TEST.arff'
fnames = [path/fname_train, path/fname_test]
fnames
[Path('/home/farid/.fastai/data/NATOPS/NATOPS_TRAIN.arff'),
 Path('/home/farid/.fastai/data/NATOPS/NATOPS_TEST.arff')]
data = TSData.from_arff(fnames)
print(data)
TSData:
 Datasets names (concatenated): ['NATOPS_TRAIN', 'NATOPS_TEST']
 Filenames:                     [Path('/home/farid/.fastai/data/NATOPS/NATOPS_TRAIN.arff'), Path('/home/farid/.fastai/data/NATOPS/NATOPS_TEST.arff')]
 Data shape: (360, 24, 51)
 Targets shape: (360,)
 Nb Samples: 360
 Nb Channels:           24
 Sequence Length: 51
# returns tuples
items = data.get_items()
idx=0
items[idx], type(items[idx][0]), type(items[idx][1])
((array([[-0.372758, -0.367844, -0.378445, ..., -0.537007, -0.475939,
          -0.479505],
         [-1.821679, -1.841987, -1.821358, ..., -1.751323, -1.772353,
          -1.761632],
         [-0.846321, -0.846325, -0.839571, ..., -0.60374 , -0.763048,
          -0.793202],
         ...,
         [ 0.619205,  0.617045,  0.624789, ...,  0.541325,  0.559272,
           0.571569],
         [-1.771773, -1.79666 , -1.738568, ..., -1.691657, -1.683121,
          -1.721661],
         [-0.810086, -0.818863, -0.78806 , ..., -0.716364, -0.757526,
          -0.741532]], dtype=float32), '4.0'), numpy.ndarray, str)
seed = 42
splits = RandomSplitter(seed=seed)(range_of(items))
splits
# len(split_idx[0]), len(split_idx[1])
((#288) [304,281,114,329,115,130,338,294,94,310...],
 (#72) [222,27,96,253,274,35,160,172,302,146...])

Using Datasets

tfms = [[ItemGetter(0), ToTensorTS()], [ItemGetter(1), Categorize()]]

# Create a dataset
ds = Datasets(items, tfms, splits=splits)
ds.vocab
(#6) ['1.0','2.0','3.0','4.0','5.0','6.0']
ds[0]
(TensorTS([[-0.3728, -0.3678, -0.3784,  ..., -0.5370, -0.4759, -0.4795],
         [-1.8217, -1.8420, -1.8214,  ..., -1.7513, -1.7724, -1.7616],
         [-0.8463, -0.8463, -0.8396,  ..., -0.6037, -0.7630, -0.7932],
         ...,
         [ 0.6192,  0.6170,  0.6248,  ...,  0.5413,  0.5593,  0.5716],
         [-1.7718, -1.7967, -1.7386,  ..., -1.6917, -1.6831, -1.7217],
         [-0.8101, -0.8189, -0.7881,  ..., -0.7164, -0.7575, -0.7415]]),
 TensorCategory(3))
ax = show_at(ds, 2, figsize=(1,1))
3.0

Create dataloaders objects using Using Datasets.dataloaders

bs = 128                            
# Normalize (at batch time) means scale using min and max
batch_tfms = [Normalize(scale_subtype = 'per_sample_per_channel', scale_range=(0, 1))]
# batch_tfms = [Standardize(scale_subtype = 'per_sample_per_channel')]

dls1 = ds.dataloaders( bs=bs, val_bs=bs * 2, after_batch=batch_tfms, num_workers=0, device=default_device()) 
dls1.show_batch(max_n=9, chs=range(0,12,3))

Using Datablock

First method : supply get_items() method

getters = [ItemGetter(0), ItemGetter(1)]  
tsdb = DataBlock(blocks=(TSBlock, CategoryBlock),
                   get_items=get_ts_items,
                   getters=getters,
                   splitter=RandomSplitter(seed=seed),
                   batch_tfms = batch_tfms)
fnames
[Path('/home/farid/.fastai/data/NATOPS/NATOPS_TRAIN.arff'),
 Path('/home/farid/.fastai/data/NATOPS/NATOPS_TEST.arff')]
tsdb.datasets(fnames, verbose=True)
Collecting items from [Path('/home/farid/.fastai/data/NATOPS/NATOPS_TRAIN.arff'), Path('/home/farid/.fastai/data/NATOPS/NATOPS_TEST.arff')]
Found 360 items
2 datasets of sizes 288,72
Setting up Pipeline: itemgetter -> ToTensorTS
Setting up Pipeline: itemgetter -> Categorize
(#360) [(TensorTS([[-0.3728, -0.3678, -0.3784,  ..., -0.5370, -0.4759, -0.4795],
        [-1.8217, -1.8420, -1.8214,  ..., -1.7513, -1.7724, -1.7616],
        [-0.8463, -0.8463, -0.8396,  ..., -0.6037, -0.7630, -0.7932],
        ...,
        [ 0.6192,  0.6170,  0.6248,  ...,  0.5413,  0.5593,  0.5716],
        [-1.7718, -1.7967, -1.7386,  ..., -1.6917, -1.6831, -1.7217],
        [-0.8101, -0.8189, -0.7881,  ..., -0.7164, -0.7575, -0.7415]]), TensorCategory(3)),(TensorTS([[-0.5474, -0.5463, -0.5497,  ..., -0.5337, -0.5283, -0.5186],
        [-1.6001, -1.5994, -1.5957,  ..., -1.5761, -1.5722, -1.5660],
        [-0.8094, -0.8094, -0.8124,  ..., -0.7662, -0.7649, -0.7658],
        ...,
        [ 0.6189,  0.6487,  0.6189,  ...,  0.4554,  0.4570,  0.4567],
        [-1.4977, -1.4659, -1.5032,  ..., -1.4356, -1.4225, -1.4218],
        [-0.7549, -0.7068, -0.7589,  ..., -0.5383, -0.5302, -0.5294]]), TensorCategory(2)),(TensorTS([[-0.5871, -0.5873, -0.5864,  ..., -0.6062, -0.6023, -0.6009],
        [-1.7550, -1.7527, -1.7513,  ..., -1.8105, -1.8065, -1.8026],
        [-0.6488, -0.6524, -0.6524,  ..., -0.6920, -0.6908, -0.6962],
        ...,
        [ 0.4176,  0.4175,  0.4171,  ...,  0.4195,  0.4196,  0.4195],
        [-1.5492, -1.5488, -1.5471,  ..., -1.5960, -1.5926, -1.5912],
        [-0.5642, -0.5664, -0.5655,  ..., -0.6184, -0.6165, -0.6174]]), TensorCategory(2)),(TensorTS([[-0.5147, -0.5186, -0.5213,  ..., -0.6287, -0.6326, -0.6065],
        [-1.8940, -1.8950, -1.8920,  ..., -1.9529, -1.9495, -1.9313],
        [-0.7490, -0.7383, -0.7287,  ..., -0.6161, -0.6443, -0.6673],
        ...,
        [ 0.4397,  0.4380,  0.4334,  ...,  0.5668,  0.5064,  0.5925],
        [-1.7014, -1.7506, -1.6242,  ..., -1.6601, -1.9693, -1.6339],
        [-0.8094, -0.8248, -0.7892,  ..., -0.5281, -0.4773, -0.5100]]), TensorCategory(3)),(TensorTS([[-0.7186, -0.7211, -0.7180,  ..., -0.6922, -0.7028, -0.7016],
        [-2.1532, -2.1583, -2.1677,  ..., -2.2045, -2.2153, -2.2065],
        [-0.8591, -0.8674, -0.8649,  ..., -0.8984, -0.8807, -0.8891],
        ...,
        [ 0.6341,  0.6385,  0.6404,  ...,  0.6152,  0.6164,  0.6071],
        [-1.9561, -1.9656, -1.9828,  ..., -1.9257, -1.8981, -1.8610],
        [-0.7977, -0.7988, -0.8005,  ..., -0.8196, -0.8099, -0.8057]]), TensorCategory(2)),(TensorTS([[ 0.4128,  0.4004,  0.4079,  ..., -0.5414, -0.5114, -0.5209],
        [-1.4391, -1.4349, -1.4434,  ..., -2.0447, -1.9984, -2.0083],
        [-0.7040, -0.7072, -0.7067,  ..., -0.6536, -0.6744, -0.6730],
        ...,
        [ 0.3991,  0.3946,  0.3889,  ...,  0.4684,  0.4535,  0.4409],
        [-1.7136, -1.7655, -1.7794,  ..., -1.7297, -1.8097, -1.7897],
        [-0.8127, -0.8212, -0.8314,  ..., -0.7213, -0.7461, -0.7611]]), TensorCategory(4)),(TensorTS([[-0.5198, -0.5276, -0.5314,  ..., -0.4603, -0.4596, -0.4567],
        [-2.1401, -2.1804, -2.1843,  ..., -2.1347, -2.1343, -2.1386],
        [-0.9572, -0.9708, -0.9702,  ..., -1.0188, -1.0170, -1.0106],
        ...,
        [ 0.5677,  0.5893,  0.5807,  ...,  0.6343,  0.6394,  0.6392],
        [-2.0287, -2.0766, -2.2355,  ..., -2.1616, -2.1565, -2.1358],
        [-0.3590, -0.3517, -0.3986,  ..., -0.3543, -0.3374, -0.3263]]), TensorCategory(0)),(TensorTS([[ 0.7434,  0.7444,  0.7453,  ..., -0.4427, -0.4494, -0.4480],
        [-0.4510, -0.4529, -0.4499,  ..., -1.7690, -1.7563, -1.7709],
        [-0.2787, -0.2814, -0.2815,  ..., -0.6865, -0.6837, -0.6810],
        ...,
        [ 0.6425,  0.6446,  0.6451,  ...,  0.3893,  0.3890,  0.3898],
        [-1.7428, -1.6736, -1.6992,  ..., -1.6443, -1.5681, -1.6428],
        [-0.8336, -0.8284, -0.8325,  ..., -0.8406, -0.8047, -0.8339]]), TensorCategory(5)),(TensorTS([[-0.7108, -0.6836, -0.6844,  ..., -0.6204, -0.6346, -0.6370],
        [-2.3022, -2.1470, -2.1496,  ..., -2.1013, -2.1361, -2.1286],
        [-0.6939, -0.7542, -0.7478,  ..., -0.8316, -0.8098, -0.8026],
        ...,
        [ 0.6181,  0.6099,  0.6151,  ...,  0.8515,  0.7799,  0.8603],
        [-1.8771, -1.7774, -1.8164,  ..., -1.9511, -2.0111, -1.9185],
        [-0.8247, -0.7931, -0.8004,  ..., -0.7416, -0.6712, -0.7527]]), TensorCategory(2)),(TensorTS([[-0.5189, -0.5196, -0.5198,  ..., -0.4880, -0.4874, -0.4883],
        [-1.7153, -1.7255, -1.7133,  ..., -1.7203, -1.7194, -1.7335],
        [-0.4344, -0.4358, -0.4286,  ..., -0.4599, -0.4580, -0.4622],
        ...,
        [ 0.6355,  0.6176,  0.6272,  ...,  0.4416,  0.4274,  0.4263],
        [-1.4818, -1.5070, -1.4729,  ..., -1.4957, -1.4009, -1.4013],
        [-0.5648, -0.6060, -0.5844,  ..., -0.6681, -0.6432, -0.6468]]), TensorCategory(1))...]

Test DataBlock

test_eq(tsdb.type_tfms[0].map(type), [ToTensorTS])
test_eq(tsdb.type_tfms[1].map(type), [Categorize])
test_eq(tsdb.default_item_tfms.map(type), [ToTensor])
test_eq(tsdb.batch_tfms.map(type), [Normalize])

Summary

tsdb.summary(fnames)
Setting-up type transforms pipelines
Collecting items from [Path('/home/farid/.fastai/data/NATOPS/NATOPS_TRAIN.arff'), Path('/home/farid/.fastai/data/NATOPS/NATOPS_TEST.arff')]
Found 360 items
2 datasets of sizes 288,72
Setting up Pipeline: itemgetter -> ToTensorTS
Setting up Pipeline: itemgetter -> Categorize

Building one sample
  Pipeline: itemgetter -> ToTensorTS
    starting from
      ([[-0.540579 -0.54101  -0.540603 ... -0.56305  -0.566314 -0.553712]
 [-1.539567 -1.540042 -1.538992 ... -1.532014 -1.534645 -1.536015]
 [-0.608539 -0.604609 -0.607679 ... -0.593769 -0.592854 -0.599014]
 ...
 [ 0.454542  0.449924  0.453195 ...  0.480281  0.45537   0.457275]
 [-1.411445 -1.363464 -1.390869 ... -1.468123 -1.368706 -1.386574]
 [-0.473406 -0.453322 -0.463813 ... -0.440582 -0.427211 -0.435581]], 2.0)
    applying itemgetter gives
      [[-0.540579 -0.54101  -0.540603 ... -0.56305  -0.566314 -0.553712]
 [-1.539567 -1.540042 -1.538992 ... -1.532014 -1.534645 -1.536015]
 [-0.608539 -0.604609 -0.607679 ... -0.593769 -0.592854 -0.599014]
 ...
 [ 0.454542  0.449924  0.453195 ...  0.480281  0.45537   0.457275]
 [-1.411445 -1.363464 -1.390869 ... -1.468123 -1.368706 -1.386574]
 [-0.473406 -0.453322 -0.463813 ... -0.440582 -0.427211 -0.435581]]
    applying ToTensorTS gives
      TensorTS of size 24x51
  Pipeline: itemgetter -> Categorize
    starting from
      ([[-0.540579 -0.54101  -0.540603 ... -0.56305  -0.566314 -0.553712]
 [-1.539567 -1.540042 -1.538992 ... -1.532014 -1.534645 -1.536015]
 [-0.608539 -0.604609 -0.607679 ... -0.593769 -0.592854 -0.599014]
 ...
 [ 0.454542  0.449924  0.453195 ...  0.480281  0.45537   0.457275]
 [-1.411445 -1.363464 -1.390869 ... -1.468123 -1.368706 -1.386574]
 [-0.473406 -0.453322 -0.463813 ... -0.440582 -0.427211 -0.435581]], 2.0)
    applying itemgetter gives
      2.0
    applying Categorize gives
      TensorCategory(1)

Final sample: (TensorTS([[-0.5406, -0.5410, -0.5406,  ..., -0.5630, -0.5663, -0.5537],
        [-1.5396, -1.5400, -1.5390,  ..., -1.5320, -1.5346, -1.5360],
        [-0.6085, -0.6046, -0.6077,  ..., -0.5938, -0.5929, -0.5990],
        ...,
        [ 0.4545,  0.4499,  0.4532,  ...,  0.4803,  0.4554,  0.4573],
        [-1.4114, -1.3635, -1.3909,  ..., -1.4681, -1.3687, -1.3866],
        [-0.4734, -0.4533, -0.4638,  ..., -0.4406, -0.4272, -0.4356]]), TensorCategory(1))


Setting up after_item: Pipeline: ToTensor
Setting up before_batch: Pipeline: 
Setting up after_batch: Pipeline: Normalize

Building one batch
Applying item_tfms to the first sample:
  Pipeline: ToTensor
    starting from
      (TensorTS of size 24x51, TensorCategory(1))
    applying ToTensor gives
      (TensorTS of size 24x51, TensorCategory(1))

Adding the next 3 samples

No before_batch transform to apply

Collating items in a batch

Applying batch_tfms to the batch built
  Pipeline: Normalize
    starting from
      (TensorTS of size 4x24x51, TensorCategory([1, 5, 4, 5]))
    applying Normalize gives
      (TensorTS of size 4x24x51, TensorCategory([1, 5, 4, 5]))
dls2 = tsdb.dataloaders(fnames, num_workers=0, device=default_device())
dls2.show_batch(ctxs=None, max_n=9, chs=range(0,12,3))

Second method : Providing items object as an argument to the DataBlock.dataloaders() method

getters = [ItemGetter(0), ItemGetter(1)] 
tsdb = DataBlock(blocks=(TSBlock, CategoryBlock),
                   getters=getters,
                   splitter=RandomSplitter(seed=seed))
# Using data.get_items()
#dls3 = tsdb.dataloaders(data.get_items(), batch_tfms=batch_tfms, num_workers=0, device=default_device())

# Or using get_ts_items([fname_train, fname_test])
dls3 = tsdb.dataloaders(get_ts_items(fnames), batch_tfms=batch_tfms, num_workers=0, device=default_device())
dls3.show_batch(ctxs=None, max_n=9, chs=range(0,12,3))

class TSDataLoaders[source]

TSDataLoaders(*loaders, path='.', device=None) :: DataLoaders

Basic wrapper around several DataLoaders.

TSDataLoaders.from_files[source]

TSDataLoaders.from_files(fnames, path='.', valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, bs=64, val_bs=None, shuffle_train=True, device=None)

Create timeseries dataloaders from a list of timeseries files in fnames in path.

# fnames = [path/fname_train, path/fname_test]
dls4 = TSDataLoaders.from_files(fnames=fnames, path=path, batch_tfms=batch_tfms, num_workers=0, device=default_device())
dls4.path
Path('/home/farid/.fastai/data/NATOPS')
dls4.show_batch(ctxs=None, max_n=9, chs=range(0,12,3))

Train Model

get_n_channels[source]

get_n_channels(dl:DataLoader)

# Number of channels (i.e. dimensions in ARFF and TS files jargon)
c_in = get_n_channels(dls2.train) # data.n_channels
# Number of classes
c_out= dls1.c 
c_in,c_out
(24, 6)

Create model

model = inception_time(c_in, c_out).to(device=default_device())
model
Sequential(
  (0): SequentialEx(
    (layers): ModuleList(
      (0): InceptionModule(
        (convs): ModuleList(
          (0): Conv1d(24, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)
          (1): Conv1d(24, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)
          (2): Conv1d(24, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
        )
        (maxpool_bottleneck): Sequential(
          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
          (1): Conv1d(24, 32, kernel_size=(1,), stride=(1,), bias=False)
        )
        (bn_relu): Sequential(
          (0): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (1): ReLU()
        )
      )
    )
  )
  (1): SequentialEx(
    (layers): ModuleList(
      (0): InceptionModule(
        (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,))
        (convs): ModuleList(
          (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)
          (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)
          (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
        )
        (maxpool_bottleneck): Sequential(
          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
          (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
        )
        (bn_relu): Sequential(
          (0): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (1): ReLU()
        )
      )
    )
  )
  (2): SequentialEx(
    (layers): ModuleList(
      (0): InceptionModule(
        (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,))
        (convs): ModuleList(
          (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)
          (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)
          (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
        )
        (maxpool_bottleneck): Sequential(
          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
          (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
        )
        (bn_relu): Sequential(
          (0): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (1): ReLU()
        )
      )
      (1): Shortcut(
        (act_fn): ReLU(inplace=True)
        (conv): Conv1d(128, 128, kernel_size=(1,), stride=(1,), bias=False)
        (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      )
    )
  )
  (3): SequentialEx(
    (layers): ModuleList(
      (0): InceptionModule(
        (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,))
        (convs): ModuleList(
          (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)
          (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)
          (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
        )
        (maxpool_bottleneck): Sequential(
          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
          (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
        )
        (bn_relu): Sequential(
          (0): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (1): ReLU()
        )
      )
    )
  )
  (4): SequentialEx(
    (layers): ModuleList(
      (0): InceptionModule(
        (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,))
        (convs): ModuleList(
          (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)
          (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)
          (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
        )
        (maxpool_bottleneck): Sequential(
          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
          (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
        )
        (bn_relu): Sequential(
          (0): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (1): ReLU()
        )
      )
    )
  )
  (5): SequentialEx(
    (layers): ModuleList(
      (0): InceptionModule(
        (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,))
        (convs): ModuleList(
          (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)
          (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)
          (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
        )
        (maxpool_bottleneck): Sequential(
          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
          (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
        )
        (bn_relu): Sequential(
          (0): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (1): ReLU()
        )
      )
      (1): Shortcut(
        (act_fn): ReLU(inplace=True)
        (conv): Conv1d(128, 128, kernel_size=(1,), stride=(1,), bias=False)
        (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      )
    )
  )
  (6): AdaptiveConcatPool1d(
    (ap): AdaptiveAvgPool1d(output_size=1)
    (mp): AdaptiveMaxPool1d(output_size=1)
  )
  (7): Flatten(full=False)
  (8): Linear(in_features=256, out_features=6, bias=True)
)
# model[5]

Build learner

# opt_func = partial(Adam, lr=3e-3, wd=0.01)
#Or use Ranger
def opt_func(p, lr=slice(3e-3)): return Lookahead(RAdam(p, lr=lr, mom=0.95, wd=0.01)) 
#Learner
loss_func = LabelSmoothingCrossEntropy() 
learn = Learner(dls2, model, opt_func=opt_func, loss_func=loss_func, metrics=accuracy)

Learner Summary

print(learn.summary())
Sequential (Input shape: ['64 x 24 x 51'])
================================================================
Layer (type)         Output Shape         Param #    Trainable 
================================================================
Conv1d               64 x 32 x 51         29,952     True      
________________________________________________________________
Conv1d               64 x 32 x 51         14,592     True      
________________________________________________________________
Conv1d               64 x 32 x 51         6,912      True      
________________________________________________________________
MaxPool1d            64 x 24 x 51         0          False     
________________________________________________________________
Conv1d               64 x 32 x 51         768        True      
________________________________________________________________
BatchNorm1d          64 x 128 x 51        256        True      
________________________________________________________________
ReLU                 64 x 128 x 51        0          False     
________________________________________________________________
Conv1d               64 x 32 x 51         4,128      True      
________________________________________________________________
Conv1d               64 x 32 x 51         39,936     True      
________________________________________________________________
Conv1d               64 x 32 x 51         19,456     True      
________________________________________________________________
Conv1d               64 x 32 x 51         9,216      True      
________________________________________________________________
MaxPool1d            64 x 128 x 51        0          False     
________________________________________________________________
Conv1d               64 x 32 x 51         4,096      True      
________________________________________________________________
BatchNorm1d          64 x 128 x 51        256        True      
________________________________________________________________
ReLU                 64 x 128 x 51        0          False     
________________________________________________________________
Conv1d               64 x 32 x 51         4,128      True      
________________________________________________________________
Conv1d               64 x 32 x 51         39,936     True      
________________________________________________________________
Conv1d               64 x 32 x 51         19,456     True      
________________________________________________________________
Conv1d               64 x 32 x 51         9,216      True      
________________________________________________________________
MaxPool1d            64 x 128 x 51        0          False     
________________________________________________________________
Conv1d               64 x 32 x 51         4,096      True      
________________________________________________________________
BatchNorm1d          64 x 128 x 51        256        True      
________________________________________________________________
ReLU                 64 x 128 x 51        0          False     
________________________________________________________________
ReLU                 64 x 128 x 51        0          False     
________________________________________________________________
Conv1d               64 x 128 x 51        16,384     True      
________________________________________________________________
BatchNorm1d          64 x 128 x 51        256        True      
________________________________________________________________
Conv1d               64 x 32 x 51         4,128      True      
________________________________________________________________
Conv1d               64 x 32 x 51         39,936     True      
________________________________________________________________
Conv1d               64 x 32 x 51         19,456     True      
________________________________________________________________
Conv1d               64 x 32 x 51         9,216      True      
________________________________________________________________
MaxPool1d            64 x 128 x 51        0          False     
________________________________________________________________
Conv1d               64 x 32 x 51         4,096      True      
________________________________________________________________
BatchNorm1d          64 x 128 x 51        256        True      
________________________________________________________________
ReLU                 64 x 128 x 51        0          False     
________________________________________________________________
Conv1d               64 x 32 x 51         4,128      True      
________________________________________________________________
Conv1d               64 x 32 x 51         39,936     True      
________________________________________________________________
Conv1d               64 x 32 x 51         19,456     True      
________________________________________________________________
Conv1d               64 x 32 x 51         9,216      True      
________________________________________________________________
MaxPool1d            64 x 128 x 51        0          False     
________________________________________________________________
Conv1d               64 x 32 x 51         4,096      True      
________________________________________________________________
BatchNorm1d          64 x 128 x 51        256        True      
________________________________________________________________
ReLU                 64 x 128 x 51        0          False     
________________________________________________________________
Conv1d               64 x 32 x 51         4,128      True      
________________________________________________________________
Conv1d               64 x 32 x 51         39,936     True      
________________________________________________________________
Conv1d               64 x 32 x 51         19,456     True      
________________________________________________________________
Conv1d               64 x 32 x 51         9,216      True      
________________________________________________________________
MaxPool1d            64 x 128 x 51        0          False     
________________________________________________________________
Conv1d               64 x 32 x 51         4,096      True      
________________________________________________________________
BatchNorm1d          64 x 128 x 51        256        True      
________________________________________________________________
ReLU                 64 x 128 x 51        0          False     
________________________________________________________________
ReLU                 64 x 128 x 51        0          False     
________________________________________________________________
Conv1d               64 x 128 x 51        16,384     True      
________________________________________________________________
BatchNorm1d          64 x 128 x 51        256        True      
________________________________________________________________
AdaptiveAvgPool1d    64 x 128 x 1         0          False     
________________________________________________________________
AdaptiveMaxPool1d    64 x 128 x 1         0          False     
________________________________________________________________
Flatten              64 x 256             0          False     
________________________________________________________________
Linear               64 x 6               1,542      True      
________________________________________________________________

Total params: 472,742
Total trainable params: 472,742
Total non-trainable params: 0

Optimizer used: <function opt_func at 0x7f80af810950>
Loss function: LabelSmoothingCrossEntropy()

Callbacks:
  - TrainEvalCallback
  - Recorder
  - ProgressCallback

LR finder

lr_min, lr_steep = learn.lr_find()
lr_min, lr_steep
(0.05248074531555176, 0.0012022644514217973)

Create ts_learner in same style as fastai2.vision

Ranger[source]

Ranger(p, lr=slice(None, 0.003, None))

# class Learner():
#     def __init__(self, dls, model, loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=trainable_params, cbs=None,
#                  metrics=None, path=None, model_dir='models', wd=defaults.wd, wd_bn_bias=False, train_bn=True,
#                  moms=(0.95,0.85,0.95)):

ts_learner[source]

ts_learner(dls, model=None, opt_func='Ranger', loss_func=None, cbs=None, metrics=None, lr=0.001, splitter='trainable_params', path=None, model_dir='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95, 0.85, 0.95))

Build a ts learner with default settings if None is passed

Train

# learn.fit_one_cycle(20, lr_max=lr_steep)
lr_max = 1e-3
epochs = 30; lr_max = lr_steep;  pct_start = .7; moms=(0.95,0.85,0.95); wd = 1e-2
learn.fit_one_cycle(epochs, lr_max=lr_max, pct_start=pct_start,  moms=moms, wd=wd)
epoch train_loss valid_loss accuracy time
0 2.830583 1.796029 0.152778 00:01
1 2.778862 1.798929 0.152778 00:02
2 2.719929 1.802782 0.152778 00:02
3 2.656564 1.807106 0.152778 00:02
4 2.554070 1.811179 0.152778 00:02
5 2.405063 1.813437 0.166667 00:02
6 2.262482 1.793917 0.236111 00:01
7 2.104606 1.731987 0.319444 00:02
8 1.943539 1.606680 0.388889 00:01
9 1.800869 1.157220 0.777778 00:01
10 1.665563 0.961588 0.847222 00:02
11 1.542775 0.845267 0.833333 00:02
12 1.437215 0.710037 0.847222 00:02
13 1.341490 0.708617 0.847222 00:02
14 1.258099 0.689999 0.847222 00:02
15 1.185911 0.776040 0.833333 00:02
16 1.118779 0.672005 0.888889 00:02
17 1.059531 0.657039 0.861111 00:02
18 1.006146 0.681480 0.847222 00:02
19 0.955037 0.704021 0.875000 00:02
20 0.910534 0.618262 0.902778 00:02
21 0.873389 0.942535 0.861111 00:02
22 0.837613 0.574862 0.944444 00:02
23 0.805253 0.585018 0.944444 00:02
24 0.776481 0.771398 0.875000 00:01
25 0.747718 0.566906 0.958333 00:02
26 0.720828 0.561012 0.944444 00:02
27 0.696164 0.581944 0.916667 00:02
28 0.673795 0.550526 0.958333 00:02
29 0.653436 0.544037 0.944444 00:02
learn.recorder.plot_loss()

Show results

learn.show_results(max_n=9, chs=range(0,12,3))

Data Normalization

ALL SAMPLES statistics

scale_subtype = 'all_samples'
# scale_subtype = 'all_samples_per_channel'
mean, std = get_mean_std(data.x, scale_subtype=scale_subtype)
mean.shape, std.shape, mean, std
(torch.Size([24, 51]),
 torch.Size([24, 51]),
 tensor([[-0.3901, -0.3901, -0.3901,  ..., -0.3901, -0.3901, -0.3901],
         [-0.3901, -0.3901, -0.3901,  ..., -0.3901, -0.3901, -0.3901],
         [-0.3901, -0.3901, -0.3901,  ..., -0.3901, -0.3901, -0.3901],
         ...,
         [-0.3901, -0.3901, -0.3901,  ..., -0.3901, -0.3901, -0.3901],
         [-0.3901, -0.3901, -0.3901,  ..., -0.3901, -0.3901, -0.3901],
         [-0.3901, -0.3901, -0.3901,  ..., -0.3901, -0.3901, -0.3901]]),
 tensor([[0.8790, 0.8790, 0.8790,  ..., 0.8790, 0.8790, 0.8790],
         [0.8790, 0.8790, 0.8790,  ..., 0.8790, 0.8790, 0.8790],
         [0.8790, 0.8790, 0.8790,  ..., 0.8790, 0.8790, 0.8790],
         ...,
         [0.8790, 0.8790, 0.8790,  ..., 0.8790, 0.8790, 0.8790],
         [0.8790, 0.8790, 0.8790,  ..., 0.8790, 0.8790, 0.8790],
         [0.8790, 0.8790, 0.8790,  ..., 0.8790, 0.8790, 0.8790]]))

Prepare TenssorTS item to use for normalization tests

idx = 3
item = items[idx]
item
(array([[-0.514671, -0.51864 , -0.521285, ..., -0.628697, -0.632625,
         -0.606548],
        [-1.893971, -1.895032, -1.891953, ..., -1.952879, -1.949485,
         -1.931336],
        [-0.748957, -0.738257, -0.728709, ..., -0.616065, -0.644297,
         -0.667284],
        ...,
        [ 0.439658,  0.437954,  0.433407, ...,  0.566757,  0.506425,
          0.592521],
        [-1.701396, -1.750605, -1.624187, ..., -1.660085, -1.96929 ,
         -1.633891],
        [-0.809376, -0.824759, -0.789241, ..., -0.528097, -0.47732 ,
         -0.51003 ]], dtype=float32), '4.0')
pipe = Pipeline([ItemGetter(0), ToTensorTS()])
t = pipe(item)
label = ItemGetter(1)(item)
t, t.shape, t.show(title=label)
(TensorTS([[-0.5147, -0.5186, -0.5213,  ..., -0.6287, -0.6326, -0.6065],
         [-1.8940, -1.8950, -1.8920,  ..., -1.9529, -1.9495, -1.9313],
         [-0.7490, -0.7383, -0.7287,  ..., -0.6161, -0.6443, -0.6673],
         ...,
         [ 0.4397,  0.4380,  0.4334,  ...,  0.5668,  0.5064,  0.5925],
         [-1.7014, -1.7506, -1.6242,  ..., -1.6601, -1.9693, -1.6339],
         [-0.8094, -0.8248, -0.7892,  ..., -0.5281, -0.4773, -0.5100]]),
 torch.Size([24, 51]),
 None)

Min and Max for all_samples or all_samples_per_channel

scale_subtype = 'all_samples_per_channel' #'all_samples_per_channel'      # 'all_samples', 'all_samples_per_channel'
min, max = get_min_max(data.x, scale_subtype=scale_subtype)
min.shape, max.shape, min, max
(torch.Size([24, 51]),
 torch.Size([24, 51]),
 tensor([[-3.0796, -3.0796, -3.0796,  ..., -3.0796, -3.0796, -3.0796],
         [-2.5026, -2.5026, -2.5026,  ..., -2.5026, -2.5026, -2.5026],
         [-3.6054, -3.6054, -3.6054,  ..., -3.6054, -3.6054, -3.6054],
         ...,
         [-1.3093, -1.3093, -1.3093,  ..., -1.3093, -1.3093, -1.3093],
         [-2.5020, -2.5020, -2.5020,  ..., -2.5020, -2.5020, -2.5020],
         [-2.7139, -2.7139, -2.7139,  ..., -2.7139, -2.7139, -2.7139]]),
 tensor([[1.7981, 1.7981, 1.7981,  ..., 1.7981, 1.7981, 1.7981],
         [1.4069, 1.4069, 1.4069,  ..., 1.4069, 1.4069, 1.4069],
         [1.3102, 1.3102, 1.3102,  ..., 1.3102, 1.3102, 1.3102],
         ...,
         [2.6948, 2.6948, 2.6948,  ..., 2.6948, 2.6948, 2.6948],
         [2.1119, 2.1119, 2.1119,  ..., 2.1119, 2.1119, 2.1119],
         [2.3708, 2.3708, 2.3708,  ..., 2.3708, 2.3708, 2.3708]]))

Test All the Normalization types

tfm_norm = Normalize(min=min, max=max, scale_subtype = 'all_samples', scale_range=(0, 1), cuda=False)
item_norm = tfm_norm(t)
test_eq_tensor(t, tfm_norm.decodes(item_norm))
tfm_norm = Normalize(min=min, max=max, scale_subtype = 'all_samples_per_channel', scale_range=(0, 1), cuda=False)
item_norm = tfm_norm(t)
test_eq_tensor(t, tfm_norm.decodes(item_norm))
# tfm_norm = Normalize(min=min, max=max, scale_subtype = 'per_sample_per_channel', scale_range=(0, 1), cuda=False)
tfm_norm = Normalize(scale_subtype = 'per_sample', scale_range=(0, 1), cuda=False)
item_norm = tfm_norm(t)
test_eq_tensor(t, tfm_norm.decodes(item_norm))
tfm_norm = Normalize(scale_subtype = 'per_sample_per_channel', scale_range=(0, 1), cuda=False)
item_norm = tfm_norm(t)
test_eq_tensor(t, tfm_norm.decodes(item_norm))

Test All the Standardization types

tfm_norm = Standardize(mean=mean, std=std, scale_subtype = 'all_samples', cuda=False)
item_norm = tfm_norm(t)
test_eq_tensor(t, tfm_norm.decodes(item_norm))
tfm_norm = Standardize(mean=mean, std=std, scale_subtype = 'all_samples_per_channel', cuda=False)
item_norm = tfm_norm(t)
test_eq_tensor(t, tfm_norm.decodes(item_norm))
tfm_norm = Standardize(scale_subtype = 'per_sample', cuda=False)
item_norm = tfm_norm(t)
test_eq_tensor(t, tfm_norm.decodes(item_norm))
tfm_norm = Standardize(scale_subtype = 'per_sample_per_channel', cuda=False)
item_norm = tfm_norm(t)
test_eq_tensor(t, tfm_norm.decodes(item_norm))
# #hide
# from nbdev.export2html import _notebook2html
# # notebook2script()
# _notebook2html(fname='81_timeseries_core.ipynb')