Skip to content

Commit

Permalink
Merge pull request #2 from javidahmed64592/refactoring-code
Browse files Browse the repository at this point in the history
Refactor code and add README, license
  • Loading branch information
javidahmed64592 authored Apr 11, 2024
2 parents 367c6c6 + b229291 commit 80aafe1
Show file tree
Hide file tree
Showing 13 changed files with 296 additions and 252 deletions.
21 changes: 21 additions & 0 deletions LICENSE
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
MIT License

Copyright (c) 2024 Javid Ahmed

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
3 changes: 3 additions & 0 deletions Pipfile
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,6 @@ mypy = "*"

[requires]
python_version = "3.11"

[scripts]
test = "python -m pytest -vx"
73 changes: 73 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
[![python](https://img.shields.io/badge/Python-3.11-3776AB.svg?style=flat&logo=python&logoColor=ffd343)](https://docs.python.org/3.11/)
<!-- omit from toc -->
# Neural Network
This is a neural network library in Python which can be used to feedforward arrays of inputs, generate outputs, and be trained with expected outputs.

<!-- omit from toc -->
## Table of Contents
- [Installing Dependencies](#installing-dependencies)
- [Using the Neural Network](#using-the-neural-network)
- [Testing](#testing)
- [Formatting, Type Checking and Linting](#formatting-type-checking-and-linting)

## Installing Dependencies
Install the required dependencies using [pipenv](https://github.com/pypa/pipenv):

pipenv install
pipenv install --dev

## Using the Neural Network
The neural network can be created in the following way:

```
from src.nn.neural_network import NeuralNetwork
nn = NeuralNetwork(input_nodes=num_inputs, hidden_nodes=num_hidden, output_nodes=num_outputs)
```

where

- `num_inputs`: Number of inputs to pass through neural network
- `num_hidden`: Number of nodes in the hidden layer
- `num_outputs`: Number of outputs to be generated

To feedforward an array of inputs:

```
outputs = nn.feedforward([x_i, ..., x_n]) # n: Number of inputs
```

The neural network can also be trained by providing an array of inputs and expected outputs, and backpropagating the error using gradient descent.

```
inputs = [x_i, ..., x_n]
expected_outputs = [y_i, ..., y_m] # m: Number of outputs
errors = nn.train(inputs, expected_outputs)
```

## Testing
This library uses Pytest for the unit tests.
These tests are located in the `tests` directory.
To run the tests:

pipenv run test

## Formatting, Type Checking and Linting
This library uses a number of tools for code formatting and linting. These tools are configured in `pyproject.toml`, `setup.cfg` and `mypy.ini`.

Black is used as a code formatter:

black .

isort is used for tidying up imports:

isort .

Mypy is used as a type checker:

mypy .

Flake8 is used for linting:

flake8
51 changes: 0 additions & 51 deletions main.py

This file was deleted.

12 changes: 11 additions & 1 deletion src/math/matrix.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from __future__ import annotations

from typing import Callable, List, Optional
from typing import Callable, List, Optional, cast

import numpy as np
from numpy.typing import NDArray
Expand Down Expand Up @@ -181,3 +181,13 @@ def map(matrix: Matrix, func: Callable) -> Matrix:
"""
new_matrix = np.vectorize(func)(matrix.data)
return Matrix.from_array(new_matrix)

def to_array(self) -> List[float]:
"""
Return Matrix as a list of floats.
Returns:
matrix_list (List[float]): Matrix as list of floats
"""
matrix_list = self.data.tolist()[0]
return cast(List[float], matrix_list)
6 changes: 3 additions & 3 deletions src/nn/neural_network.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import List, cast
from typing import List

import numpy as np
from numpy.typing import NDArray
Expand Down Expand Up @@ -52,7 +52,7 @@ def feedforward(self, inputs: NDArray | List[float]) -> List[float]:
hidden = self._hidden_layer.feedforward(input_matrix)
output = self._output_layer.feedforward(hidden)
output = Matrix.transpose(output)
return cast(List[float], output.data[0])
return output.to_array()

def train(self, inputs: List[float], expected_outputs: List[float]) -> List[float]:
"""
Expand Down Expand Up @@ -84,4 +84,4 @@ def train(self, inputs: List[float], expected_outputs: List[float]) -> List[floa
self._hidden_layer.backpropagate_error(layer_vals=hidden, input_vals=input_matrix, errors=hidden_errors)

output_errors = Matrix.transpose(output_errors)
return cast(List[float], output_errors.data[0])
return output_errors.to_array()
32 changes: 8 additions & 24 deletions src/nn/node.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,22 +47,6 @@ def random_node(cls, size: int, weights_range: List[float], bias_range: List[flo
node = cls(_weights, _bias, activation)
return node

@classmethod
def with_params(cls, weights: NDArray, bias: float, activation: Callable) -> Node:
"""
Create Node with assigned weights and bias.
Parameters:
weights (List[float]): Node weights
bias (float): Node bias
activation (Callable): Node activation function
Returns:
node (Node): Node with assigned weights and bias
"""
node = cls(weights, bias, activation)
return node

def _calculate_output(self, inputs: List[float]) -> float:
"""
Calculate node output from array of inputs.
Expand Down Expand Up @@ -101,8 +85,8 @@ def _calculate_delta_w(self, inputs: List[float], error: float) -> NDArray:
Returns:
delta_w (NDArray): Array to add to weights
"""
delta_factor = error * self.LR
delta_w = np.array(inputs) * delta_factor
_delta_factor = error * self.LR
delta_w = np.array(inputs) * _delta_factor
return delta_w

def _calculate_delta_b(self, error: float) -> float:
Expand Down Expand Up @@ -139,8 +123,8 @@ def feedforward(self, inputs: List[float]) -> float:
Returns:
output (float): Node output
"""
sum = self._calculate_output(inputs=inputs)
output = self._activation(sum)
_sum = self._calculate_output(inputs=inputs)
output = self._activation(_sum)
return cast(float, output)

def train(self, inputs: List[float], target: float) -> None:
Expand All @@ -151,7 +135,7 @@ def train(self, inputs: List[float], target: float) -> None:
inputs (List[float]): Inputs to pass through feedforward
target (float): Expected output from inputs
"""
guess = self.feedforward(inputs)
if guess != target:
error = self._calculate_error(guess, target)
self._backpropagate(inputs, error)
_output = self.feedforward(inputs)
if _output != target:
_error = self._calculate_error(_output, target)
self._backpropagate(inputs, _error)
66 changes: 66 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
import pytest

from src.math.matrix import Matrix
from src.nn.layer import Layer
from src.nn.neural_network import NeuralNetwork
from src.nn.node import Node


@pytest.fixture
def mock_activation():
return lambda x: x * 3


@pytest.fixture
def mock_weights_range():
return [-1.0, 1.0]


@pytest.fixture
def mock_bias_range():
return [-1.0, 1.0]


@pytest.fixture
def mock_inputs():
return [1.0, 0.5, 0.7]


@pytest.fixture
def mock_len_inputs(mock_inputs):
return len(mock_inputs)


@pytest.fixture
def mock_len_hidden():
return 5


@pytest.fixture
def mock_outputs():
return [0.0, 1.0]


@pytest.fixture
def mock_len_outputs(mock_outputs):
return len(mock_outputs)


@pytest.fixture
def mock_nn(mock_len_inputs, mock_len_outputs):
return NeuralNetwork(mock_len_inputs, 5, mock_len_outputs)


@pytest.fixture
def mock_layer(mock_len_hidden, mock_len_inputs, mock_activation):
return Layer(mock_len_hidden, mock_len_inputs, mock_activation)


@pytest.fixture
def mock_node(mock_len_inputs, mock_weights_range, mock_bias_range, mock_activation):
return Node.random_node(mock_len_inputs, mock_weights_range, mock_bias_range, mock_activation)


@pytest.fixture
def mock_input_matrix(mock_inputs):
return Matrix.from_array(mock_inputs)
Loading

0 comments on commit 80aafe1

Please sign in to comment.