PINN(物理信息神经网络)无法使用 PyTorch 生成正确的 ODE

问题描述 投票:0回答:1

我有一首 ODE:

equation

我想通过 PINN 进行训练和预测。我偶然发现了this文章,并将 ODE 方法应用到了我的 ODE 中。

我的实现如下:

from typing import Callable
import matplotlib.pyplot as plt
import torch
from scipy.integrate import solve_ivp
from torch import nn
import numpy as np

class NNApproximator(nn.Module):
    def __init__(self, num_hidden: int, dim_hidden: int, act=nn.Tanh()):

        super().__init__()

        self.layer_in = nn.Linear(1, dim_hidden)
        self.layer_out = nn.Linear(dim_hidden, 1)

        num_middle = num_hidden - 1
        self.middle_layers = nn.ModuleList(
            [nn.Linear(dim_hidden, dim_hidden) for _ in range(num_middle)]
        )
        self.act = act

    def forward(self, x):
        out = self.act(self.layer_in(x))
        for layer in self.middle_layers:
            out = self.act(layer(out))
        return self.layer_out(out)


def f(nn: NNApproximator, x: torch.Tensor) -> torch.Tensor:
    """Compute the value of the approximate solution from the NN model"""
    return nn(x)


def df(nn: NNApproximator, x: torch.Tensor = None, order: int = 1) -> torch.Tensor:
    """Compute neural network derivative with respect to input features using PyTorch autograd engine"""
    df_value = f(nn, x)
    for _ in range(order):
        df_value = torch.autograd.grad(
            df_value,
            x,
            grad_outputs=torch.ones_like(x),
            create_graph=True,
            retain_graph=True,
        )[0]

    return df_value

A = 4.489118023741378e-09
n = 2.27328529739465
m = 0.3010926429016967
stress = 105.0
T0 = 0 # initial time
TF = 1000.0
F0 = 0.0 # initial boundary condition value

def compute_loss(
    nn: NNApproximator, x: torch.Tensor = None, verbose: bool = False
) -> torch.float:
    """Compute the full loss function as interior loss + boundary loss
    This custom loss function is fully defined with differentiable tensors therefore
    the .backward() method can be applied to it
    """

    interior_loss = df(nn, x) - (A * stress**n * x**(m - 1))

    boundary = torch.Tensor([T0])
    boundary.requires_grad = True
    boundary_loss = f(nn, boundary) - F0
    final_loss = interior_loss.pow(2).mean() + boundary_loss ** 2
    return final_loss


def train_model(
    nn: NNApproximator,
    loss_fn: Callable,
    learning_rate: int = 0.01,
    max_epochs: int = 1_000,
) -> NNApproximator:

    loss_evolution = []

    optimizer = torch.optim.SGD(nn.parameters(), lr=learning_rate)
    for epoch in range(max_epochs):

        try:

            loss: torch.Tensor = loss_fn(nn)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if epoch % 1000 == 0:
                print(f"Epoch: {epoch} - Loss: {float(loss):>7f}")

            loss_evolution.append(loss.detach().numpy())

        except KeyboardInterrupt:
            break

    return nn, np.array(loss_evolution)


from functools import partial

domain = [T0, TF]
x = torch.logspace(np.log10(domain[0]), np.log10(domain[1]), steps=1000, requires_grad=True)
x = x.reshape(x.shape[0], 1)

nn_approximator = NNApproximator(4, 10)

# train the PINN
loss_fn = partial(compute_loss, x=x, verbose=True)
nn_approximator_trained, loss_evolution = train_model(
    nn_approximator, loss_fn=loss_fn, learning_rate=0.1, max_epochs=5_000
)

然后我将其绘制如下:

x_eval = torch.linspace(domain[0], domain[1], steps=100).reshape(-1, 1)

# numeric solution
def eq_fn(x, y):
    return A * stress**n * x**(m - 1)

numeric_solution = solve_ivp(
    eq_fn, domain, [F0], t_eval=x_eval.squeeze().detach().numpy()
)

x = torch.linspace(domain[0], domain[1], steps=100, requires_grad=True)
x = x.reshape(x.shape[0], 1)

# plotting
fig, ax = plt.subplots()

f_final_training = f(nn_approximator_trained, x)
f_final = f(nn_approximator_trained, x_eval)

ax.scatter(x.detach().numpy(), f_final_training.detach().numpy(), label="Training points", color="red")
ax.plot(x_eval.detach().numpy(), f_final.detach().numpy(), label="NN final solution")
ax.plot(
    x_eval.detach().numpy(),
    numeric_solution.y.T,
    label=f"Analytic solution",
    color="green",
    alpha=0.75,
)
ax.set(title="Equation solved with NNs", xlabel="t", ylabel="f(t)")
ax.legend()

fig, ax = plt.subplots()
ax.semilogy(loss_evolution)
ax.set(title="Loss evolution", xlabel="# epochs", ylabel="Loss")
ax.legend()

plt.show()

但是,我得到这个结果:

与 ODE 相比,PINN 生成不同的级数。有人可以指出我在构建 PINN 时做错了什么,因为它不能形成与 ODE 相同的过程吗?

deep-learning neural-network pytorch ode
1个回答
0
投票

尝试在torch.autograd.grad中将retain_graph设置为None(而不是True)并保持create_graph等于True。

© www.soinside.com 2019 - 2024. All rights reserved.