Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions data_compression/huffman.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,18 @@ def build_tree(letters: list[Letter]) -> Letter | TreeNode:
"""
Run through the list of Letters and build the min heap
for the Huffman Tree.

>>> letters = [
... Letter('a', 5),
... Letter('b', 9),
... Letter('c', 12),
... Letter('d', 13)
... ]
>>> root = build_tree(letters)
>>> isinstance(root, TreeNode)
True
>>> root.freq
39
"""
response: list[Letter | TreeNode] = list(letters)
while len(response) > 1:
Expand All @@ -55,6 +67,16 @@ def traverse_tree(root: Letter | TreeNode, bitstring: str) -> list[Letter]:
"""
Recursively traverse the Huffman Tree to set each
Letter's bitstring dictionary, and return the list of Letters

>>> letters = [Letter('a', 2), Letter('b', 3), Letter('c', 4)]
>>> root = build_tree(letters)
>>> result = traverse_tree(root, "")
>>> sorted([l.letter for l in result])
['a', 'b', 'c']
>>> all(l.bitstring[l.letter] for l in result)
True
>>> sum(l.freq for l in result)
9
"""
if isinstance(root, Letter):
root.bitstring[root.letter] = bitstring
Expand Down
25 changes: 25 additions & 0 deletions neural_network/back_propagation_neural_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,18 @@


def sigmoid(x: np.ndarray) -> np.ndarray:
"""
Compute the sigmoid activation function

>>> import numpy as np
>>> np.allclose(sigmoid(np.array([0])), np.array([0.5]))
True
>>> np.allclose(
... sigmoid(np.array([-1, 0, 1])),
... np.array([0.26894142, 0.5, 0.73105858])
... )
True
"""
return 1 / (1 + np.exp(-x))


Expand Down Expand Up @@ -158,6 +170,19 @@ def train(self, xdata, ydata, train_round, accuracy):
return None

def cal_loss(self, ydata, ydata_):
"""
Calculate Mean Squared Error (MSE) loss and its gradient.

>>> import numpy as np
>>> bp = BPNN()
>>> y_true = np.asmatrix([[1.0], [0.5]])
>>> y_pred = np.asmatrix([[0.8], [0.3]])
>>> loss, grad = bp.cal_loss(y_true, y_pred)
>>> float(round(loss, 2))
0.08
>>> np.allclose(grad, np.array([[-0.4], [-0.4]]))
True
"""
self.loss = np.sum(np.power((ydata - ydata_), 2))
self.loss_gradient = 2 * (ydata_ - ydata)
# vector (shape is the same as _ydata.shape)
Expand Down