Perceptron

This is the simplest neural network model, consisting of a single linear layer followed by a step function.

import torch
import torch.nn as nn
import torch.nn.functional as F

class Perceptron(nn.Module):
    def __init__(self, input_size):
        super(Perceptron, self).__init__()
        self.weights = nn.Parameter(torch.randn(input_size, 1), requires_grad=true)
        self.bias = nn.Parameter(torch.tensor(0), requires_grad=true)

    def forward(self, x):
        x = torch.mm(x, self.weights) + self.bias
        return torch.sigmoid(x)

# Example usage:
perceptron = Perceptron(input_size=3)
example_input = torch.tensor([[1.0, 2.0, 3.0]])  # Note the extra brackets for batch dimension
print("Perceptron Output:", perceptron(example_input))

PyTorch consolidates the two parameters, weights and bias, into a unified class known as nn.Linear. This class represents a linear layer that includes both parameters. Below is an alternative yet common way to achieve this.

class Perceptron(nn.Module):
    def __init__(self, input_size):
        super(Perceptron, self).__init__()
        self.fc = nn.Linear(input_size, 1)

    def forward(self, x):
        x = self.fc(x)
        return torch.sigmoid(x)

# Example usage:
perceptron = Perceptron(input_size=3)
example_input = torch.tensor([1.0, 2.0, 3.0])
print("Perceptron Output:", perceptron(example_input))

Single Layer of Perceptron (SLP)

class SingleLayerPerceptron(nn.Module):
    def __init__(self, input_size, output_size):
        super(SingleLayerPerceptron, self).__init__()
        self.weights = nn.Parameter(torch.randn(input_size, output_size))
        self.bias = nn.Parameter(torch.randn(output_size))

    def forward(self, x):
        x = torch.mm(x, self.weights) + self.bias
        return torch.sigmoid(x)

# Example usage:
slp = SingleLayerPerceptron(input_size=3, output_size=2)
print("SLP Output:", slp(example_input))

Practice, using nn.Linear to realize this SLP

Answer

3. Multi-layer Perceptron (MLP)

class MultiLayerPerceptron(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(MultiLayerPerceptron, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.fc2 = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        x = torch.relu(self.fc1(x))  # Using ReLU activation for hidden layer
        return torch.sigmoid(self.fc2(x))  # Using sigmoid for output activation

# Example usage:
mlp = MultiLayerPerceptron(input_size=3, hidden_size=5, output_size=2)
example_input = torch.tensor([1.0, 2.0, 3.0])
print("MLP Output:", mlp(example_input))

Practice implementing this MLP using nn.Parameter, and create a simple node-edge graph to represent the structure of the neural network. Calculate the total number of parameters in the network.

Answer