import torch # Known parameters for a damped oscillator: m*ddx + c*dx + k*x = 0, with m=1, k=1, c=0.1 c, k = 0.1, 1.0 # Neural network representing x(t) model = torch.nn.Sequential( torch.nn.Linear(1, 20), torch.nn.Tanh(), torch.nn.Linear(20, 1) ) # Collocation points in time domain t_colloc = torch.linspace(0, 10, steps=100, requires_grad=True).reshape(-1, 1) optimizer = torch.optim.Adam(model.parameters(), lr=0.01) for epoch in range(1000): x_pred = model(t_colloc) # Compute first and second derivatives with respect to time dx_pred = torch.autograd.grad(x_pred, t_colloc, grad_outputs=torch.ones_like(x_pred), retain_graph=True, create_graph=True)[0] ddx_pred = torch.autograd.grad(dx_pred, t_colloc, grad_outputs=torch.ones_like(dx_pred), retain_graph=True, create_graph=True)[0] # Compute ODE residual: ddx + c*dx + k*x should be zero ode_residual = ddx_pred + c * dx_pred + k * x_pred phys_loss = torch.mean(ode_residual**2) # Enforce initial conditions: x(0) = 1, dx(0) = 0 x0_pred = model(torch.zeros(1, 1)) dx0_pred = torch.autograd.grad(x0_pred, torch.zeros(1, 1), grad_outputs=torch.ones(1, 1), create_graph=True)[0] ic_loss = (x0_pred - 1.0)**2 + (dx0_pred - 0.0)**2 loss = phys_loss + 100 * ic_loss optimizer.zero_grad() loss.backward() optimizer.step()