Skip to contents

This will materialize a lazy_tensor() or a data.frame() / list() containing – among other things – lazy_tensor() columns. I.e. the data described in the underlying DataDescriptors is loaded for the indices in the lazy_tensor(), is preprocessed and then put unto the specified device. Because not all elements in a lazy tensor must have the same shape, a list of tensors is returned by default. If all elements have the same shape, these tensors can also be rbinded into a single tensor (parameter rbind).

Usage

materialize(x, device = "cpu", rbind = FALSE, ...)

# S3 method for class 'list'
materialize(x, device = "cpu", rbind = FALSE, cache = "auto", ...)

Arguments

x

(any)
The object to materialize. Either a lazy_tensor or a list() / data.frame() containing lazy_tensor columns.

device

(character(1))
The torch device.

rbind

(logical(1))
Whether to rbind the lazy tensor columns (TRUE) or return them as a list of tensors (FALSE). In the second case, there is no batch dimension.

...

(any)
Additional arguments.

cache

(character(1) or environment() or NULL)
Optional cache for (intermediate) materialization results. Per default, caching will be enabled when the same dataset or data descriptor (with different output pointer) is used for more than one lazy tensor column.

Value

(list() of lazy_tensors or a lazy_tensor)

Details

Materializing a lazy tensor consists of:

  1. Loading the data from the internal dataset of the DataDescriptor.

  2. Processing these batches in the preprocessing Graphs.

  3. Returning the result of the PipeOp pointed to by the DataDescriptor (pointer).

With multiple lazy_tensor columns we can benefit from caching because: a) Output(s) from the dataset might be input to multiple graphs. b) Different lazy tensors might be outputs from the same graph.

For this reason it is possible to provide a cache environment. The hash key for a) is the hash of the indices and the dataset. The hash key for b) is the hash of the indices, dataset and preprocessing graph.

Examples

lt1 = as_lazy_tensor(torch_randn(10, 3))
materialize(lt1, rbind = TRUE)
#> torch_tensor
#>  2.3937 -0.6552  0.5912
#> -0.6402 -0.2214  0.0688
#>  0.3112 -1.3514 -0.2221
#> -1.0521  1.4368 -0.2947
#> -0.0088  1.8650 -0.2726
#> -0.3050  1.4418 -1.4295
#>  0.1936 -0.2371  0.3523
#> -0.9092  2.3197  1.5285
#>  0.6236 -0.3492  0.1090
#> -1.4876  0.0125  0.0976
#> [ CPUFloatType{10,3} ]
materialize(lt1, rbind = FALSE)
#> [[1]]
#> torch_tensor
#>  2.3937
#> -0.6552
#>  0.5912
#> [ CPUFloatType{3} ]
#> 
#> [[2]]
#> torch_tensor
#> -0.6402
#> -0.2214
#>  0.0688
#> [ CPUFloatType{3} ]
#> 
#> [[3]]
#> torch_tensor
#>  0.3112
#> -1.3514
#> -0.2221
#> [ CPUFloatType{3} ]
#> 
#> [[4]]
#> torch_tensor
#> -1.0521
#>  1.4368
#> -0.2947
#> [ CPUFloatType{3} ]
#> 
#> [[5]]
#> torch_tensor
#> -0.0088
#>  1.8650
#> -0.2726
#> [ CPUFloatType{3} ]
#> 
#> [[6]]
#> torch_tensor
#> -0.3050
#>  1.4418
#> -1.4295
#> [ CPUFloatType{3} ]
#> 
#> [[7]]
#> torch_tensor
#>  0.1936
#> -0.2371
#>  0.3523
#> [ CPUFloatType{3} ]
#> 
#> [[8]]
#> torch_tensor
#> -0.9092
#>  2.3197
#>  1.5285
#> [ CPUFloatType{3} ]
#> 
#> [[9]]
#> torch_tensor
#>  0.6236
#> -0.3492
#>  0.1090
#> [ CPUFloatType{3} ]
#> 
#> [[10]]
#> torch_tensor
#> -1.4876
#>  0.0125
#>  0.0976
#> [ CPUFloatType{3} ]
#> 
lt2 = as_lazy_tensor(torch_randn(10, 4))
d = data.table::data.table(lt1 = lt1, lt2 = lt2)
materialize(d, rbind = TRUE)
#> $lt1
#> torch_tensor
#>  2.3937 -0.6552  0.5912
#> -0.6402 -0.2214  0.0688
#>  0.3112 -1.3514 -0.2221
#> -1.0521  1.4368 -0.2947
#> -0.0088  1.8650 -0.2726
#> -0.3050  1.4418 -1.4295
#>  0.1936 -0.2371  0.3523
#> -0.9092  2.3197  1.5285
#>  0.6236 -0.3492  0.1090
#> -1.4876  0.0125  0.0976
#> [ CPUFloatType{10,3} ]
#> 
#> $lt2
#> torch_tensor
#> -0.1832 -0.0016  0.2789 -0.3613
#> -1.0369  0.8124 -1.0232 -0.9276
#>  0.9368  0.3896 -2.3135  0.0016
#>  0.4726  0.7433  0.3275 -1.5877
#>  1.1388 -0.9015  1.4931  0.7154
#>  0.9215 -1.5595  1.2522  0.6361
#> -2.1116 -0.7641 -0.9406 -0.5185
#>  2.9249 -2.9686 -0.8731 -1.9139
#> -0.6206 -0.5168  0.6367 -1.5128
#>  0.0437  0.1406  1.7150  1.0964
#> [ CPUFloatType{10,4} ]
#> 
materialize(d, rbind = FALSE)
#> $lt1
#> $lt1[[1]]
#> torch_tensor
#>  2.3937
#> -0.6552
#>  0.5912
#> [ CPUFloatType{3} ]
#> 
#> $lt1[[2]]
#> torch_tensor
#> -0.6402
#> -0.2214
#>  0.0688
#> [ CPUFloatType{3} ]
#> 
#> $lt1[[3]]
#> torch_tensor
#>  0.3112
#> -1.3514
#> -0.2221
#> [ CPUFloatType{3} ]
#> 
#> $lt1[[4]]
#> torch_tensor
#> -1.0521
#>  1.4368
#> -0.2947
#> [ CPUFloatType{3} ]
#> 
#> $lt1[[5]]
#> torch_tensor
#> -0.0088
#>  1.8650
#> -0.2726
#> [ CPUFloatType{3} ]
#> 
#> $lt1[[6]]
#> torch_tensor
#> -0.3050
#>  1.4418
#> -1.4295
#> [ CPUFloatType{3} ]
#> 
#> $lt1[[7]]
#> torch_tensor
#>  0.1936
#> -0.2371
#>  0.3523
#> [ CPUFloatType{3} ]
#> 
#> $lt1[[8]]
#> torch_tensor
#> -0.9092
#>  2.3197
#>  1.5285
#> [ CPUFloatType{3} ]
#> 
#> $lt1[[9]]
#> torch_tensor
#>  0.6236
#> -0.3492
#>  0.1090
#> [ CPUFloatType{3} ]
#> 
#> $lt1[[10]]
#> torch_tensor
#> -1.4876
#>  0.0125
#>  0.0976
#> [ CPUFloatType{3} ]
#> 
#> 
#> $lt2
#> $lt2[[1]]
#> torch_tensor
#> -0.1832
#> -0.0016
#>  0.2789
#> -0.3613
#> [ CPUFloatType{4} ]
#> 
#> $lt2[[2]]
#> torch_tensor
#> -1.0369
#>  0.8124
#> -1.0232
#> -0.9276
#> [ CPUFloatType{4} ]
#> 
#> $lt2[[3]]
#> torch_tensor
#>  0.9368
#>  0.3896
#> -2.3135
#>  0.0016
#> [ CPUFloatType{4} ]
#> 
#> $lt2[[4]]
#> torch_tensor
#>  0.4726
#>  0.7433
#>  0.3275
#> -1.5877
#> [ CPUFloatType{4} ]
#> 
#> $lt2[[5]]
#> torch_tensor
#>  1.1388
#> -0.9015
#>  1.4931
#>  0.7154
#> [ CPUFloatType{4} ]
#> 
#> $lt2[[6]]
#> torch_tensor
#>  0.9215
#> -1.5595
#>  1.2522
#>  0.6361
#> [ CPUFloatType{4} ]
#> 
#> $lt2[[7]]
#> torch_tensor
#> -2.1116
#> -0.7641
#> -0.9406
#> -0.5185
#> [ CPUFloatType{4} ]
#> 
#> $lt2[[8]]
#> torch_tensor
#>  2.9249
#> -2.9686
#> -0.8731
#> -1.9139
#> [ CPUFloatType{4} ]
#> 
#> $lt2[[9]]
#> torch_tensor
#> -0.6206
#> -0.5168
#>  0.6367
#> -1.5128
#> [ CPUFloatType{4} ]
#> 
#> $lt2[[10]]
#> torch_tensor
#>  0.0437
#>  0.1406
#>  1.7150
#>  1.0964
#> [ CPUFloatType{4} ]
#> 
#>