backward {tfNeuralODE}R Documentation

Backward pass of the Neural ODE

Description

Backward pass of the Neural ODE

Usage

backward(model, tsteps, outputs, output_gradients = NULL)

Arguments

model

A keras neural network that defines the Neural ODE.

tsteps

A vector of each time step upon which the Neural ODE is solved to get to the final solution.

outputs

The tensor outputs of the forward pass of the Neural ODE.

output_gradients

The tensor gradients of the loss function.

Value

The model input at the last time step.

The gradient of loss with respect to the inputs for use with the Adjoint Method.

The gradients of loss the neural ODE.

Examples


reticulate::py_module_available("tensorflow")

# example code
# single training example
OdeModel(keras$Model) %py_class% {
 initialize <- function() {
   super$initialize()
   self$block_1 <- layer_dense(units = 50, activation = 'tanh')
   self$block_2 <- layer_dense(units = 2, activation = 'linear')
 }

 call <- function(inputs) {
   x<- inputs ^ 3
   x <- self$block_1(x)
   self$block_2(x)
 }
}
tsteps <- seq(0, 2.5, by = 2.5/10)
true_y0 = t(c(2., 0.))
model<- OdeModel()
optimizer = tf$keras$optimizers$legacy$Adam(learning_rate = 1e-3)
# single training iteration
pred = forward(model, true_y0, tsteps)
with(tf$GradientTape() %as% tape, {
  tape$watch(pred)
  loss = tf$reduce_mean(tf$abs(pred - inp[[2]]))
})
dLoss = tape$gradient(loss, pred)
list_w = backward(model, tsteps[1:batch_time], pred, output_gradients = dLoss)
optimizer$apply_gradients(zip_lists(list_w[[3]], model$trainable_variables))


[Package tfNeuralODE version 0.1.0 Index]