Ingress for a single lazy_tensor
column.
Parameters
shape
::integer()
The shape of the tensor, where the first dimension (batch) must beNA
. When it is not specified, the lazy tensor input column needs to have a known shape.
Input and Output Channels
One input channel called "input"
and one output channel called "output"
.
For an explanation see PipeOpTorch
.
See also
Other PipeOps:
mlr_pipeops_nn_avg_pool1d
,
mlr_pipeops_nn_avg_pool2d
,
mlr_pipeops_nn_avg_pool3d
,
mlr_pipeops_nn_batch_norm1d
,
mlr_pipeops_nn_batch_norm2d
,
mlr_pipeops_nn_batch_norm3d
,
mlr_pipeops_nn_block
,
mlr_pipeops_nn_celu
,
mlr_pipeops_nn_conv1d
,
mlr_pipeops_nn_conv2d
,
mlr_pipeops_nn_conv3d
,
mlr_pipeops_nn_conv_transpose1d
,
mlr_pipeops_nn_conv_transpose2d
,
mlr_pipeops_nn_conv_transpose3d
,
mlr_pipeops_nn_dropout
,
mlr_pipeops_nn_elu
,
mlr_pipeops_nn_flatten
,
mlr_pipeops_nn_gelu
,
mlr_pipeops_nn_glu
,
mlr_pipeops_nn_hardshrink
,
mlr_pipeops_nn_hardsigmoid
,
mlr_pipeops_nn_hardtanh
,
mlr_pipeops_nn_head
,
mlr_pipeops_nn_layer_norm
,
mlr_pipeops_nn_leaky_relu
,
mlr_pipeops_nn_linear
,
mlr_pipeops_nn_log_sigmoid
,
mlr_pipeops_nn_max_pool1d
,
mlr_pipeops_nn_max_pool2d
,
mlr_pipeops_nn_max_pool3d
,
mlr_pipeops_nn_merge
,
mlr_pipeops_nn_merge_cat
,
mlr_pipeops_nn_merge_prod
,
mlr_pipeops_nn_merge_sum
,
mlr_pipeops_nn_prelu
,
mlr_pipeops_nn_relu
,
mlr_pipeops_nn_relu6
,
mlr_pipeops_nn_reshape
,
mlr_pipeops_nn_rrelu
,
mlr_pipeops_nn_selu
,
mlr_pipeops_nn_sigmoid
,
mlr_pipeops_nn_softmax
,
mlr_pipeops_nn_softplus
,
mlr_pipeops_nn_softshrink
,
mlr_pipeops_nn_softsign
,
mlr_pipeops_nn_squeeze
,
mlr_pipeops_nn_tanh
,
mlr_pipeops_nn_tanhshrink
,
mlr_pipeops_nn_threshold
,
mlr_pipeops_nn_unsqueeze
,
mlr_pipeops_torch_ingress
,
mlr_pipeops_torch_ingress_categ
,
mlr_pipeops_torch_ingress_num
,
mlr_pipeops_torch_loss
,
mlr_pipeops_torch_model
,
mlr_pipeops_torch_model_classif
,
mlr_pipeops_torch_model_regr
Other Graph Network:
ModelDescriptor()
,
TorchIngressToken()
,
mlr_learners_torch_model
,
mlr_pipeops_module
,
mlr_pipeops_torch
,
mlr_pipeops_torch_ingress
,
mlr_pipeops_torch_ingress_categ
,
mlr_pipeops_torch_ingress_num
,
model_descriptor_to_learner()
,
model_descriptor_to_module()
,
model_descriptor_union()
,
nn_graph()
Super classes
mlr3pipelines::PipeOp
-> mlr3torch::PipeOpTorchIngress
-> PipeOpTorchIngressLazyTensor
Methods
Method new()
Creates a new instance of this R6 class.
Usage
PipeOpTorchIngressLazyTensor$new(
id = "torch_ingress_ltnsr",
param_vals = list()
)
Arguments
id
(
character(1)
)
Identifier of the resulting object.param_vals
(
list()
)
List of hyperparameter settings, overwriting the hyperparameter settings that would otherwise be set during construction.
Examples
po_ingress = po("torch_ingress_ltnsr")
task = tsk("lazy_iris")
md = po_ingress$train(list(task))[[1L]]
ingress = md$ingress
x_batch = ingress[[1L]]$batchgetter(data = task$data(1, "x"), device = "cpu", cache = NULL)
x_batch
#> torch_tensor
#> 5.1000 3.5000 1.4000 0.2000
#> [ CPUFloatType{1,4} ]
# Now we try a lazy tensor with unknown shape, i.e. the shapes between the rows can differ
ds = dataset(
initialize = function() self$x = list(torch_randn(3, 10, 10), torch_randn(3, 8, 8)),
.getitem = function(i) list(x = self$x[[i]]),
.length = function() 2)()
task_unknown = as_task_regr(data.table(
x = as_lazy_tensor(ds, dataset_shapes = list(x = NULL)),
y = rnorm(2)
), target = "y", id = "example2")
# this task (as it is) can NOT be processed by PipeOpTorchIngressLazyTensor
# It therefore needs to be preprocessed
po_resize = po("trafo_resize", size = c(6, 6))
task_unknown_resize = po_resize$train(list(task_unknown))[[1L]]
# printing the transformed column still shows unknown shapes,
# because the preprocessing pipeop cannot infer them,
# however we know that the shape is now (3, 10, 10) for all rows
task_unknown_resize$data(1:2, "x")
#> x
#> <lazy_tensor>
#> 1: <tnsr[]>
#> 2: <tnsr[]>
po_ingress$param_set$set_values(shape = c(NA, 3, 6, 6))
md2 = po_ingress$train(list(task_unknown_resize))[[1L]]
ingress2 = md2$ingress
x_batch2 = ingress2[[1L]]$batchgetter(
data = task_unknown_resize$data(1:2, "x"),
device = "cpu",
cache = NULL
)
x_batch2
#> torch_tensor
#> (1,1,.,.) =
#> -0.2078 -0.1131 -1.6323 -0.2804 -0.9031 -0.5297
#> -1.1554 -0.1849 0.4042 1.5520 1.3144 0.0003
#> -0.2729 0.3735 -0.7645 0.1629 1.0660 -0.3381
#> 0.2746 0.5842 -0.2112 -0.1229 0.3845 -0.1859
#> -1.0791 0.0709 -0.3241 -1.5738 0.4490 -0.9358
#> 0.9194 0.0116 -0.1817 0.1471 0.7634 -0.2087
#>
#> (2,1,.,.) =
#> 0.1565 -0.8394 0.6526 0.1929 0.4309 -0.0917
#> 0.0937 0.0723 0.7762 -0.9165 0.0850 -0.3978
#> 0.1873 -0.4686 0.2160 -0.7938 -0.3049 -1.2551
#> 0.4127 -0.1960 0.3186 -0.9407 0.3018 -0.9580
#> -0.2428 0.0559 0.1596 -0.2397 -0.3844 -0.4555
#> 0.1110 -0.3334 -1.5132 -0.5237 -0.4017 -0.8347
#>
#> (1,2,.,.) =
#> 0.6009 -0.7224 0.5385 -0.7747 -0.3309 0.3409
#> -2.0009 -1.0333 -0.2406 0.1875 0.0271 0.1803
#> 0.4144 -1.3590 0.9025 -0.1537 1.0076 0.4751
#> 0.2995 0.1819 0.6950 -0.0064 -1.0788 -0.0101
#> 0.8651 1.8329 -0.3954 -0.9614 1.4065 0.0720
#> -0.4567 0.5490 -0.3825 -0.1049 1.3999 0.5676
#>
#> (2,2,.,.) =
#> 1.7634 0.3604 0.1907 0.0011 -0.6949 -0.6822
#> 0.2982 0.1261 -0.5402 0.0859 -0.6995 0.8001
#> -0.5016 -0.3856 -1.0377 -0.3096 0.2825 0.1058
#> -0.1890 -0.8383 0.0932 -0.5084 0.2517 -0.6923
#> 0.7076 0.1294 0.4360 -0.6302 -0.3922 -0.0934
#> ... [the output was truncated (use n=-1 to disable)]
#> [ CPUFloatType{2,3,6,6} ]