使用 pytorch 实现物理通知神经网络

问题描述 投票:0回答:2

我发现了一篇非常有趣的论文,物理信息深度学习(第一部分):非线性偏微分方程的数据驱动解决方案并且想尝试一下。为此,我创建了一个虚拟问题并实现了我从论文中理解的内容。

问题陈述

假设我想用初始条件

dy/dx = cos(x)
求解 ODE
y(0)=y(2*pi)=0
。其实我们很容易猜出解析解
y(x)=sin(x)
。但我想看看模型如何使用 PINN 预测解决方案。

# import libraries
import torch
import torch.autograd as autograd # computation graph
import torch.nn as nn  # neural networks
import torch.optim as optim # optimizers e.g. gradient descent, ADAM, etc.
import matplotlib.pyplot as plt
import numpy as np

#Set default dtype to float32
torch.set_default_dtype(torch.float)
#PyTorch random number generator
torch.manual_seed(1234)
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)

模型架构

## Model Architecture
class FCN(nn.Module):
    ##Neural Network
    def __init__(self,layers):
        super().__init__() #call __init__ from parent class 
        # activation function
        self.activation = nn.Tanh()
        # loss function
        self.loss_function = nn.MSELoss(reduction ='mean')
        # Initialise neural network as a list using nn.Modulelist 
        self.linears = nn.ModuleList([nn.Linear(layers[i], layers[i+1]) for i in range(len(layers)-1)]) 
        self.iter = 0
         # Xavier Normal Initialization
        for i in range(len(layers)-1):
            nn.init.xavier_normal_(self.linears[i].weight.data, gain=1.0)
            # set biases to zero
            nn.init.zeros_(self.linears[i].bias.data)   
    
    # foward pass
    def forward(self,x):
        if torch.is_tensor(x) != True:         
            x = torch.from_numpy(x)                
        a = x.float()
        for i in range(len(layers)-2):  
            z = self.linears[i](a)              
            a = self.activation(z)    
        a = self.linears[-1](a)
        return a
    # Loss Functions
    #Loss PDE
    def lossPDE(self,x_PDE):
      g=x_PDE.clone()
      g.requires_grad=True #Enable differentiation
      f=self.forward(g)
      f_x=autograd.grad(f,g,torch.ones([x_PDE.shape[0],1]).to(device),\
        retain_graph=True, create_graph=True)[0]
      loss_PDE=self.loss_function(f_x,PDE(g))
      return loss_PDE

生成数据

# generate training and evaluation points
x = torch.linspace(min,max,total_points).view(-1,1) 
y = torch.sin(x)
print(x.shape, y.shape)

# Set Boundary conditions:
# Actually for this problem 
# we don't need extra boundary constraint
# as it was concided with x_PDE point & value
# BC_1=x[0,:]
# BC_2=x[-1,:]
# print(BC_1,BC_2)
# x_BC=torch.vstack([BC_1,BC_2])
# print(x_BC)
x_PDE = x[1:-1,:]
print(x_PDE.shape)

x_PDE=x_PDE.float().to(device)
# x_BC=x_BC.to(device)
#Create Model
layers = np.array([1,50,50,50,50,1])
model = FCN(layers)
print(model)
model.to(device)
params = list(model.parameters())
optimizer = torch.optim.Adam(model.parameters(),lr=lr,amsgrad=False)

训练神经网络

for i in range(500):
    yh = model(x_PDE)
    loss = model.loss_PDE(x_PDE) # use mean squared error
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    if i%(500/10)==0:
      print(loss)

使用 PINN 预测解决方案

# predict the solution beyond training set 
x = torch.linspace(0,max+max,total_points).view(-1,1)
yh=model(x.to(device))
y=torch.sin(x)
#Error
print(model.lossBC(x.to(device)))

y_plot=y.detach().numpy()
yh_plot=yh.detach().cpu().numpy()
fig, ax1 = plt.subplots()
ax1.plot(x,y_plot,color='blue',label='Real')
ax1.plot(x,yh_plot,color='red',label='Predicted')
ax1.set_xlabel('x',color='black')
ax1.set_ylabel('f(x)',color='black')
ax1.tick_params(axis='y', color='black')
ax1.legend(loc = 'upper left')

但最终的结果却令人失望。该模型无法学习简单的 ODE。我想知道我的模型架构可能有一些问题,我自己无法弄清楚。有人可以提出任何改进吗?

python deep-learning neural-network pytorch
2个回答
0
投票

检查您的代码后,我对测试数据集有疑问;我不太确定你的 preds 不好的原因是否是因为你没有添加 model.eval() 我不熟悉这个网络/模型,但作为我对 cnn 和基本 gcn 的经验,我倾向于使用 model.eval() 来预测我的结果(图表上的橙色线)

例如,如果我是你,我会这样做:

for i in range(500):
    model.train()
    yh = model(x_PDE)
    loss = model.loss_PDE(x_PDE) # use mean squared error
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    if i%(500/10)==0:
        print(loss)
        model.eval()
        -- your test function just like your train but without backward optim --

我不确定这是否会影响你的答案


0
投票

真是巧合,我这些天一直在学习PINN。正如评论中所述,调试并指出模型中棘手的部分确实是复杂且耗时的。但是,我尝试自己解决你的问题。我在 PyTorch 上创建了一个模型,其中包含两个隐藏层,每个隐藏层有 64 个单元,每层后面都有

Tanh()
激活函数。我希望这有帮助:

class CreateDataset(torch.utils.data.Dataset):

  def __init__(self, dataset):

    self.x = dataset[0]
    self.y = dataset[1]
    self.length = self.x.shape[0]

  def __len__(self):

    return self.length

  def __getitem__(self, index):

    return self.x[index], self.y[index]

PI = np.pi
SEED = 7
EPOCHS = 10000
BATCH_SIZE = 100
torch.manual_seed(SEED)

def initial_condition_loss(model, t):

  t0 = torch.tensor(BATCH_SIZE * [[0.0]])
  t1 = torch.tensor(BATCH_SIZE * [[2 * PI]])

  return torch.square(model(t0)) + torch.square(model(t1))

def residual_Loss(model, t):

  t.requires_grad_(True)

  u = model(t)
  u_t = torch.autograd.grad(torch.sum(u), t, retain_graph=True, create_graph=True)[0]

  return torch.square(u_t - torch.cos(t))

def total_loss(model, t):

  return torch.sum(initial_condition_loss(model, t) + residual_Loss(model, t))

t = torch.empty((100, 1))
t.uniform_(0.0, 15.0)

tt = CreateDataset([t, t])

tt_loader = torch.utils.data.DataLoader(tt, batch_size=BATCH_SIZE, shuffle=True)

def set_weight(weights):

  return torch.nn.init.xavier_uniform_(weights)

def set_bias(biases):

  return torch.nn.init.zeros_(biases)

def set_activation():

  return torch.nn.Tanh()

model = # YOUR MODEL HERE!!!

optimizer = torch.optim.Adam(model.parameters())

def training(model, data_loader, epochs):

  model.train()

  for epoch in range(1, epochs + 1):

    loss_value = 0.0

    for t, _ in data_loader:

      loss = total_loss(model, t)
      loss.backward()
      optimizer.step()
      optimizer.zero_grad()
      loss_value += loss.item()

    loss_value /= len(data_loader)

    if epoch % 1000 == 0:

      print(f'Epoch: {epoch} - Loss: {loss_value}')

def prediction(model, dataset):

  model.eval()

  y = model(dataset)

  return y

观察损失:

Epoch: 1000 - Loss: 0.997127115726471
Epoch: 2000 - Loss: 0.011104697361588478
Epoch: 3000 - Loss: 0.0016031203558668494
Epoch: 4000 - Loss: 0.0007361933821812272
Epoch: 5000 - Loss: 0.0004778479633387178
Epoch: 6000 - Loss: 0.00038485831464640796
Epoch: 7000 - Loss: 0.0003462266467977315
Epoch: 8000 - Loss: 0.0003259943041484803
Epoch: 9000 - Loss: 0.14281785488128662
Epoch: 10000 - Loss: 0.00029919209191575646

检查结果:

plot_data = torch.arange(0.0, 15.0, 0.001).reshape((-1, 1))

u = prediction(model, plot_data)

plt.plot(plot_data, u.detach(), 'bo', markersize=1.75)
plt.show()

Result

© www.soinside.com 2019 - 2024. All rights reserved.