import torch # Define a simple HNN model: input (q, p) -> output H (the system energy) class HNN(torch.nn.Module): def __init__(self, hidden_dim=32): super().__init__() self.net = torch.nn.Sequential( torch.nn.Linear(2, hidden_dim), torch.nn.Tanh(), torch.nn.Linear(hidden_dim, 1) # output: scalar H ) def forward(self, q, p): # Stack q and p to form the input vector and return H(q, p) return self.net(torch.stack([q, p], dim=-1)).squeeze(-1) hnn = HNN(hidden_dim=16) optimizer = torch.optim.Adam(hnn.parameters(), lr=0.01) # Synthetic data: small oscillations of a mass-spring system (q: position, p: momentum) t = torch.linspace(0, 10, steps=101) q_data = torch.cos(t) p_data = -torch.sin(t) dq_data = torch.gradient(q_data, spacing=(t,))[0] # true dq/dt = p for m=1 dp_data = torch.gradient(p_data, spacing=(t,))[0] # true dp/dt = -q for k=1 # Training loop to enforce Hamilton's equations as loss for epoch in range(1000): idx = torch.randint(0, len(t), (32,)) q, p = q_data[idx], p_data[idx] H = hnn(q, p) # Compute gradients of H with respect to p and q via autograd dH_dp = torch.autograd.grad(H, p, grad_outputs=torch.ones_like(H), retain_graph=True, create_graph=True)[0] dH_dq = torch.autograd.grad(H, q, grad_outputs=torch.ones_like(H), retain_graph=True, create_graph=True)[0] dq_pred = dH_dp # predicted dq/dt dp_pred = -dH_dq # predicted dp/dt dq_true = dq_data[idx] dp_true = dp_data[idx] loss = torch.mean((dq_pred - dq_true)**2 + (dp_pred - dp_true)**2) optimizer.zero_grad() loss.backward() optimizer.step() q_test, p_test = 1.0, 0.0 H_pred = hnn(torch.tensor(q_test), torch.tensor(p_test)).item() print("Learned Hamiltonian at (q=1, p=0):", H_pred) print("True Hamiltonian at (q=1, p=0):", 0.5 * q_test**2 + 0.5 * p_test**2)