class MultiLayerPerceptron(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(MultiLayerPerceptron, self).__init__()
        # First layer parameters
        self.weights1 = nn.Parameter(torch.randn(input_size, hidden_size))
        self.bias1 = nn.Parameter(torch.randn(hidden_size))
        # Second layer parameters
        self.weights2 = nn.Parameter(torch.randn(hidden_size, output_size))
        self.bias2 = nn.Parameter(torch.randn(output_size))

    def forward(self, x):
        # First layer
        x = torch.relu(torch.mm(x, self.weights1) + self.bias1)
        # Second layer
        x = torch.sigmoid(torch.mm(x, self.weights2) + self.bias2)
        return x

# Example usage:
mlp = MultiLayerPerceptron(input_size=3, hidden_size=5, output_size=2)
print("MLP Output:", mlp(example_input))

The total number of parameters: (input_size + 1) * hidden_size + (hidden_size + 1) * output_size.