|
Pytorch负反馈多层神经网络:
模型如下:
- class module_net(nn.Module):
- def __init__(self, num_input, num_hidden, num_output):
- super(module_net, self).__init__()
- self.layer1 = nn.Linear(num_input, num_hidden)
- self.layer2 = nn.Tanh()
- self.layer3 = nn.Linear(num_hidden, num_hidden)
- self.layer4 = nn.Tanh()
- self.layer5 = nn.Linear(num_hidden, num_hidden)
- self.layer6 = nn.Tanh()
- self.layer7 = nn.Linear(num_hidden, num_output)
- def forward(self, x):
- x = self.layer1(x)
- x = self.layer2(x)
- x = self.layer3(x)
- x = self.layer4(x)
- x = self.layer5(x)
- x = self.layer6(x)
- x = self.layer7(x)
- return x
复制代码 简单的模型如下:
- class module_net(nn.Module):
- def __init__(self, num_input, num_hidden, num_output):
- super(module_net, self).__init__()
- self.layer1 = nn.Linear(num_input, num_hidden)
- self.layer2 = nn.ReLU()
- self.layer3 = nn.Linear(num_hidden, num_output)
- self.layer4 = nn.ReLU()
- def forward(self, x):
- x = self.layer1(x)
- x = self.layer2(x)
- x = self.layer3(x)
- x = self.layer4(x)
- return x
复制代码 当然也可以加阈值:- class module_net(nn.Module):
- def __init__(self, num_input, num_hidden, num_output):
- super(module_net, self).__init__()
- self.layer1 = nn.Linear(num_input, num_hidden)
- self.bn1 = nn.BatchNorm1d(num_input)
- self.layer2 = nn.Tanh()
- self.layer3 = nn.Linear(num_hidden, num_hidden)
- self.bn2 = nn.BatchNorm1d(num_hidden)
- self.layer4 = nn.Tanh()
- self.layer5 = nn.Linear(num_hidden, num_hidden)
- self.bn3 = nn.BatchNorm1d(num_hidden)
- self.layer6 = nn.Tanh()
- self.layer7 = nn.Linear(num_hidden, num_output)
- self.bn4 = nn.BatchNorm1d(num_output)
- def forward(self, x):
- x = self.layer1(x)
- x = self.bn1(x)
- x = self.layer2(x)
- x = self.layer3(x)
- x = self.bn2(x)
- x = self.layer4(x)
- x = self.layer5(x)
- x = self.bn3(x)
- x = self.layer6(x)
- x = self.layer7(x)
- x = self.bn4(x)
- return x
复制代码
参考链接:
【1】【PyTorch 深度学习】4.用PyTorch实现多层网络
【2】基于多隐藏层的BP神经网络
|
|