Implement the neural network below,
A hard-coded approach is shown below:
import torch
import torch.nn as nn
import torch.nn.functional as F
class CustomNetwork(nn.Module):
def __init__(self):
super(CustomNetwork, self).__init__()
# Weights for input to hidden layer
self.w1 = nn.Parameter(torch.tensor([1.0]))
self.w2 = nn.Parameter(torch.tensor([1.0]))
self.w3 = nn.Parameter(torch.tensor([1.0]))
# Biases for hidden layer
self.b1 = nn.Parameter(torch.tensor([2.5]))
self.b2 = nn.Parameter(torch.tensor([1.5]))
self.b3 = nn.Parameter(torch.tensor([0.5]))
# Weights for hidden to output layer
self.m1 = nn.Parameter(torch.tensor([1.0]))
self.m2 = nn.Parameter(torch.tensor([-2.0]))
self.m3 = nn.Parameter(torch.tensor([1.0]))
# Bias for output layer
self.g = nn.Parameter(torch.tensor([-1.0]))
def forward(self, x):
# Hidden layer calculations
h1 = F.relu(self.w1 * x + self.b1)
h2 = F.relu(self.w2 * x + self.b2)
h3 = F.relu(self.w3 * x + self.b3)
# Output layer calculation
y_hat = (self.m1 * h1 + self.m2 * h2 + self.m3 * h3 + self.g)
return y_hat
# Example usage
model = CustomNetwork()
x_input = torch.tensor([1.0]) # Example input
output = model(x_input)
print("Output of the neural network:", output.item())
The nn.Linear
approach
import torch
import torch.nn as nn
import torch.nn.functional as F
class CustomNetwork(nn.Module):
def __init__(self):
super(CustomNetwork, self).__init__()
# Initialize layers
# Single input to Three outputs
self.hidden = nn.Linear(1, 3, bias=True)
# Three inputs from hidden to one output
self.output_layer = nn.Linear(3, 1, bias=True)
# Manually setting the weights and biases of the hidden layer
self.hidden.weight.data = torch.tensor([[1.0], [1.0], [1.0]]) # 2D 3x1
self.hidden.bias.data = torch.tensor([2.5, 1.5, 0.5]) # 1D
# Manually setting the weights and biases of the output layer
self.output_layer.weight.data = torch.tensor([[1.0, -2.0, 1.0]]) # 2D 1x3
self.output_layer.bias.data = torch.tensor([-1.0]) # 1D
# Remember, Weight Multiplication is usually left Multiplication
# W2@(W1@(x)) not x@W1@W2,
# so W2_shape[1, 3]@W1_shape[3, 1]@x_shape[1, 1]=shape[1, 1]
def forward(self, x):
# Hidden layer operations with ReLU activation
h = F.relu(self.hidden(x))
# Concatenating hidden outputs for the output layer
y_hat = self.output_layer(h)
return y_hat
# Example usage
model = CustomNetwork()
x_input = torch.tensor([[1.0]]) # Batch dimension included
output = model(x_input)
print("Output of the neural network:", output.item())