Skip to contents

This will materialize a lazy_tensor() or a data.frame() / list() containing -- among other things -- lazy_tensor() columns. I.e. the data described in the underlying DataDescriptors is loaded for the indices in the lazy_tensor(), is preprocessed and then put unto the specified device. Because not all elements in a lazy tensor must have the same shape, a list of tensors is returned by default. If all elements have the same shape, these tensors can also be rbinded into a single tensor (parameter rbind).

Usage

materialize(x, device = "cpu", rbind = FALSE, ...)

# S3 method for list
materialize(x, device = "cpu", rbind = FALSE, cache = "auto", ...)

Arguments

x

(any)
The object to materialize. Either a lazy_tensor or a list() / data.frame() containing lazy_tensor columns.

device

(character(1))
The torch device.

rbind

(logical(1))
Whether to rbind the lazy tensor columns (TRUE) or return them as a list of tensors (FALSE). In the second case, there is no batch dimension.

...

(any)
Additional arguments.

cache

(character(1) or environment() or NULL)
Optional cache for (intermediate) materialization results. Per default, caching will be enabled when the same dataset or data descriptor (with different output pointer) is used for more than one lazy tensor column.

Value

(list() of lazy_tensors or a lazy_tensor)

Details

Materializing a lazy tensor consists of:

  1. Loading the data from the internal dataset of the DataDescriptor.

  2. Processing these batches in the preprocessing Graphs.

  3. Returning the result of the PipeOp pointed to by the DataDescriptor (pointer).

With multiple lazy_tensor columns we can benefit from caching because: a) Output(s) from the dataset might be input to multiple graphs. b) Different lazy tensors might be outputs from the same graph.

For this reason it is possible to provide a cache environment. The hash key for a) is the hash of the indices and the dataset. The hash key for b) is the hash of the indices, dataset and preprocessing graph.

Examples

lt1 = as_lazy_tensor(torch_randn(10, 3))
materialize(lt1, rbind = TRUE)
#> torch_tensor
#> -0.6321  1.1257 -0.6134
#> -1.4116  0.5588  0.2612
#> -1.3462  0.8896 -0.8164
#>  1.5439 -0.4825 -0.1642
#>  0.8312  1.4684  0.0010
#>  0.0054 -1.1388 -0.6080
#> -2.0798  1.4014 -0.4132
#>  0.3418  0.2732 -0.2309
#> -0.5310  1.5148  0.5543
#> -0.9964 -1.0038  0.2355
#> [ CPUFloatType{10,3} ]
materialize(lt1, rbind = FALSE)
#> [[1]]
#> torch_tensor
#> -0.6321
#>  1.1257
#> -0.6134
#> [ CPUFloatType{3} ]
#> 
#> [[2]]
#> torch_tensor
#> -1.4116
#>  0.5588
#>  0.2612
#> [ CPUFloatType{3} ]
#> 
#> [[3]]
#> torch_tensor
#> -1.3462
#>  0.8896
#> -0.8164
#> [ CPUFloatType{3} ]
#> 
#> [[4]]
#> torch_tensor
#>  1.5439
#> -0.4825
#> -0.1642
#> [ CPUFloatType{3} ]
#> 
#> [[5]]
#> torch_tensor
#>  0.8312
#>  1.4684
#>  0.0010
#> [ CPUFloatType{3} ]
#> 
#> [[6]]
#> torch_tensor
#> 0.001 *
#>  5.4318
#> -1138.8277
#> -607.9853
#> [ CPUFloatType{3} ]
#> 
#> [[7]]
#> torch_tensor
#> -2.0798
#>  1.4014
#> -0.4132
#> [ CPUFloatType{3} ]
#> 
#> [[8]]
#> torch_tensor
#>  0.3418
#>  0.2732
#> -0.2309
#> [ CPUFloatType{3} ]
#> 
#> [[9]]
#> torch_tensor
#> -0.5310
#>  1.5148
#>  0.5543
#> [ CPUFloatType{3} ]
#> 
#> [[10]]
#> torch_tensor
#> -0.9964
#> -1.0038
#>  0.2355
#> [ CPUFloatType{3} ]
#> 
lt2 = as_lazy_tensor(torch_randn(10, 4))
d = data.table::data.table(lt1 = lt1, lt2 = lt2)
materialize(d, rbind = TRUE)
#> $lt1
#> torch_tensor
#> -0.6321  1.1257 -0.6134
#> -1.4116  0.5588  0.2612
#> -1.3462  0.8896 -0.8164
#>  1.5439 -0.4825 -0.1642
#>  0.8312  1.4684  0.0010
#>  0.0054 -1.1388 -0.6080
#> -2.0798  1.4014 -0.4132
#>  0.3418  0.2732 -0.2309
#> -0.5310  1.5148  0.5543
#> -0.9964 -1.0038  0.2355
#> [ CPUFloatType{10,3} ]
#> 
#> $lt2
#> torch_tensor
#>  0.5962 -1.0352  0.5103 -2.5386
#> -0.2638  0.2794 -1.3635 -0.7627
#> -0.2461 -0.1206  1.7060  0.3797
#> -1.5901 -1.4695  2.4355 -1.1145
#>  0.3107 -0.2019 -0.1514  0.6885
#>  0.0324 -0.1345  0.4282  1.9258
#> -0.9561  0.0530  0.8580  0.4080
#> -0.6767 -1.2658  0.2860  0.0426
#>  0.3886 -0.2799 -0.5332 -0.5206
#> -1.8741  1.2591  1.9836 -1.2977
#> [ CPUFloatType{10,4} ]
#> 
materialize(d, rbind = FALSE)
#> $lt1
#> $lt1[[1]]
#> torch_tensor
#> -0.6321
#>  1.1257
#> -0.6134
#> [ CPUFloatType{3} ]
#> 
#> $lt1[[2]]
#> torch_tensor
#> -1.4116
#>  0.5588
#>  0.2612
#> [ CPUFloatType{3} ]
#> 
#> $lt1[[3]]
#> torch_tensor
#> -1.3462
#>  0.8896
#> -0.8164
#> [ CPUFloatType{3} ]
#> 
#> $lt1[[4]]
#> torch_tensor
#>  1.5439
#> -0.4825
#> -0.1642
#> [ CPUFloatType{3} ]
#> 
#> $lt1[[5]]
#> torch_tensor
#>  0.8312
#>  1.4684
#>  0.0010
#> [ CPUFloatType{3} ]
#> 
#> $lt1[[6]]
#> torch_tensor
#> 0.001 *
#>  5.4318
#> -1138.8277
#> -607.9853
#> [ CPUFloatType{3} ]
#> 
#> $lt1[[7]]
#> torch_tensor
#> -2.0798
#>  1.4014
#> -0.4132
#> [ CPUFloatType{3} ]
#> 
#> $lt1[[8]]
#> torch_tensor
#>  0.3418
#>  0.2732
#> -0.2309
#> [ CPUFloatType{3} ]
#> 
#> $lt1[[9]]
#> torch_tensor
#> -0.5310
#>  1.5148
#>  0.5543
#> [ CPUFloatType{3} ]
#> 
#> $lt1[[10]]
#> torch_tensor
#> -0.9964
#> -1.0038
#>  0.2355
#> [ CPUFloatType{3} ]
#> 
#> 
#> $lt2
#> $lt2[[1]]
#> torch_tensor
#>  0.5962
#> -1.0352
#>  0.5103
#> -2.5386
#> [ CPUFloatType{4} ]
#> 
#> $lt2[[2]]
#> torch_tensor
#> -0.2638
#>  0.2794
#> -1.3635
#> -0.7627
#> [ CPUFloatType{4} ]
#> 
#> $lt2[[3]]
#> torch_tensor
#> -0.2461
#> -0.1206
#>  1.7060
#>  0.3797
#> [ CPUFloatType{4} ]
#> 
#> $lt2[[4]]
#> torch_tensor
#> -1.5901
#> -1.4695
#>  2.4355
#> -1.1145
#> [ CPUFloatType{4} ]
#> 
#> $lt2[[5]]
#> torch_tensor
#>  0.3107
#> -0.2019
#> -0.1514
#>  0.6885
#> [ CPUFloatType{4} ]
#> 
#> $lt2[[6]]
#> torch_tensor
#>  0.0324
#> -0.1345
#>  0.4282
#>  1.9258
#> [ CPUFloatType{4} ]
#> 
#> $lt2[[7]]
#> torch_tensor
#> -0.9561
#>  0.0530
#>  0.8580
#>  0.4080
#> [ CPUFloatType{4} ]
#> 
#> $lt2[[8]]
#> torch_tensor
#> -0.6767
#> -1.2658
#>  0.2860
#>  0.0426
#> [ CPUFloatType{4} ]
#> 
#> $lt2[[9]]
#> torch_tensor
#>  0.3886
#> -0.2799
#> -0.5332
#> -0.5206
#> [ CPUFloatType{4} ]
#> 
#> $lt2[[10]]
#> torch_tensor
#> -1.8741
#>  1.2591
#>  1.9836
#> -1.2977
#> [ CPUFloatType{4} ]
#> 
#>