Skip to content

Commit

Permalink
Backend implementation for CPU, NumPy, and GPU
Browse files Browse the repository at this point in the history
  • Loading branch information
m0saan committed Jul 4, 2023
1 parent c2f21aa commit d5de119
Show file tree
Hide file tree
Showing 22 changed files with 989 additions and 619 deletions.
14 changes: 14 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
cmake_minimum_required(VERSION 3.25)
project(minima)

set(CMAKE_CXX_STANDARD 14)

include_directories(include)
include_directories(include/cpu_backend)

add_executable(minima
include/cpu_backend/aligned_buffer.h
include/cpu_backend/operations.h
src/cpu_backend/aligned_array.cc
src/cpu_backend/operations.cc
src/main.cc)
Binary file added datasets/fashion/t10k-images-idx3-ubyte.gz
Binary file not shown.
Binary file added datasets/fashion/t10k-labels-idx1-ubyte.gz
Binary file not shown.
Binary file added datasets/fashion/train-images-idx3-ubyte.gz
Binary file not shown.
Binary file added datasets/fashion/train-labels-idx1-ubyte.gz
Binary file not shown.
3 changes: 3 additions & 0 deletions datasets/mnist/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# Downloading the MNIST dataset

http://yann.lecun.com/exdb/mnist/
6 changes: 3 additions & 3 deletions minima/__init__.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
__version__ = "0.0.1"
from . import autograd
from .autograd import Tensor, cpu, all_devices

from . import operators
from .operators import *
from .autograd import Tensor
from .minima_backend import *

from .ndarray import *
from . import nn
from . import init
from . import optim
Expand Down
53 changes: 36 additions & 17 deletions minima/_modidx.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,7 @@
'doc_host': 'https://m0saan.github.io',
'git_url': 'https://github.com/m0saan/minima',
'lib_path': 'minima'},
'syms': { 'minima.autograd': { 'minima.autograd.CPUDevice': ('autograd.html#cpudevice', 'minima/autograd.py'),
'minima.autograd.CPUDevice.__eq__': ('autograd.html#cpudevice.__eq__', 'minima/autograd.py'),
'minima.autograd.CPUDevice.__hash__': ('autograd.html#cpudevice.__hash__', 'minima/autograd.py'),
'minima.autograd.CPUDevice.__repr__': ('autograd.html#cpudevice.__repr__', 'minima/autograd.py'),
'minima.autograd.CPUDevice.enabled': ('autograd.html#cpudevice.enabled', 'minima/autograd.py'),
'minima.autograd.CPUDevice.one_hot': ('autograd.html#cpudevice.one_hot', 'minima/autograd.py'),
'minima.autograd.CPUDevice.ones': ('autograd.html#cpudevice.ones', 'minima/autograd.py'),
'minima.autograd.CPUDevice.rand': ('autograd.html#cpudevice.rand', 'minima/autograd.py'),
'minima.autograd.CPUDevice.randn': ('autograd.html#cpudevice.randn', 'minima/autograd.py'),
'minima.autograd.CPUDevice.zeros': ('autograd.html#cpudevice.zeros', 'minima/autograd.py'),
'minima.autograd.Device': ('autograd.html#device', 'minima/autograd.py'),
'minima.autograd.Operator': ('autograd.html#operator', 'minima/autograd.py'),
'syms': { 'minima.autograd': { 'minima.autograd.Operator': ('autograd.html#operator', 'minima/autograd.py'),
'minima.autograd.Operator.__call__': ('autograd.html#operator.__call__', 'minima/autograd.py'),
'minima.autograd.Operator.compute': ('autograd.html#operator.compute', 'minima/autograd.py'),
'minima.autograd.Operator.gradient': ('autograd.html#operator.gradient', 'minima/autograd.py'),
Expand All @@ -39,6 +28,7 @@
'minima.autograd.Tensor._array_from_numpy': ( 'autograd.html#tensor._array_from_numpy',
'minima/autograd.py'),
'minima.autograd.Tensor._init': ('autograd.html#tensor._init', 'minima/autograd.py'),
'minima.autograd.Tensor.argmax': ('autograd.html#tensor.argmax', 'minima/autograd.py'),
'minima.autograd.Tensor.backward': ('autograd.html#tensor.backward', 'minima/autograd.py'),
'minima.autograd.Tensor.broadcast_to': ('autograd.html#tensor.broadcast_to', 'minima/autograd.py'),
'minima.autograd.Tensor.create_detached_tensor': ( 'autograd.html#tensor.create_detached_tensor',
Expand All @@ -48,6 +38,7 @@
'minima.autograd.Tensor.device': ('autograd.html#tensor.device', 'minima/autograd.py'),
'minima.autograd.Tensor.dtype': ('autograd.html#tensor.dtype', 'minima/autograd.py'),
'minima.autograd.Tensor.exp': ('autograd.html#tensor.exp', 'minima/autograd.py'),
'minima.autograd.Tensor.item': ('autograd.html#tensor.item', 'minima/autograd.py'),
'minima.autograd.Tensor.make_from_op': ('autograd.html#tensor.make_from_op', 'minima/autograd.py'),
'minima.autograd.Tensor.matmul': ('autograd.html#tensor.matmul', 'minima/autograd.py'),
'minima.autograd.Tensor.numpy': ('autograd.html#tensor.numpy', 'minima/autograd.py'),
Expand Down Expand Up @@ -79,9 +70,7 @@
'minima.autograd.Value.is_leaf': ('autograd.html#value.is_leaf', 'minima/autograd.py'),
'minima.autograd.Value.item': ('autograd.html#value.item', 'minima/autograd.py'),
'minima.autograd.Value.relu': ('autograd.html#value.relu', 'minima/autograd.py'),
'minima.autograd.Value.tanh': ('autograd.html#value.tanh', 'minima/autograd.py'),
'minima.autograd.all_devices': ('autograd.html#all_devices', 'minima/autograd.py'),
'minima.autograd.cpu': ('autograd.html#cpu', 'minima/autograd.py')},
'minima.autograd.Value.tanh': ('autograd.html#value.tanh', 'minima/autograd.py')},
'minima.data': { 'minima.data.BatchSampler': ('data.html#batchsampler', 'minima/data.py'),
'minima.data.BatchSampler.__init__': ('data.html#batchsampler.__init__', 'minima/data.py'),
'minima.data.BatchSampler.__iter__': ('data.html#batchsampler.__iter__', 'minima/data.py'),
Expand All @@ -95,8 +84,7 @@
'minima.data.Dataset.apply_transforms': ('data.html#dataset.apply_transforms', 'minima/data.py'),
'minima.data.Sampler': ('data.html#sampler', 'minima/data.py'),
'minima.data.Sampler.__init__': ('data.html#sampler.__init__', 'minima/data.py'),
'minima.data.Sampler.__iter__': ('data.html#sampler.__iter__', 'minima/data.py'),
'minima.data.collate': ('data.html#collate', 'minima/data.py')},
'minima.data.Sampler.__iter__': ('data.html#sampler.__iter__', 'minima/data.py')},
'minima.init': { 'minima.init.constant': ('init.html#constant', 'minima/init.py'),
'minima.init.kaiming_normal': ('init.html#kaiming_normal', 'minima/init.py'),
'minima.init.kaiming_uniform': ('init.html#kaiming_uniform', 'minima/init.py'),
Expand All @@ -108,6 +96,8 @@
'minima.init.xavier_normal': ('init.html#xavier_normal', 'minima/init.py'),
'minima.init.xavier_uniform': ('init.html#xavier_uniform', 'minima/init.py'),
'minima.init.zeros': ('init.html#zeros', 'minima/init.py')},
'minima.learner': {},
'minima.minima_backend': {},
'minima.ndarray': { 'minima.ndarray.BackendDevice': ('ndarray.html#backenddevice', 'minima/ndarray.py'),
'minima.ndarray.BackendDevice.__eq__': ('ndarray.html#backenddevice.__eq__', 'minima/ndarray.py'),
'minima.ndarray.BackendDevice.__getattr__': ('ndarray.html#backenddevice.__getattr__', 'minima/ndarray.py'),
Expand Down Expand Up @@ -168,6 +158,7 @@
'minima.ndarray.NDArray.sum': ('ndarray.html#ndarray.sum', 'minima/ndarray.py'),
'minima.ndarray.NDArray.tanh': ('ndarray.html#ndarray.tanh', 'minima/ndarray.py'),
'minima.ndarray.NDArray.to': ('ndarray.html#ndarray.to', 'minima/ndarray.py'),
'minima.ndarray.all_devices': ('ndarray.html#all_devices', 'minima/ndarray.py'),
'minima.ndarray.cpu_numpy': ('ndarray.html#cpu_numpy', 'minima/ndarray.py'),
'minima.ndarray.default_device': ('ndarray.html#default_device', 'minima/ndarray.py')},
'minima.ndarray_backend_numpy': { 'minima.ndarray_backend_numpy.Array': ( 'ndarray_backend_numpy.html#array',
Expand All @@ -178,8 +169,36 @@
'minima/ndarray_backend_numpy.py'),
'minima.ndarray_backend_numpy.Array.size': ( 'ndarray_backend_numpy.html#array.size',
'minima/ndarray_backend_numpy.py'),
'minima.ndarray_backend_numpy.CPUDevice': ( 'ndarray_backend_numpy.html#cpudevice',
'minima/ndarray_backend_numpy.py'),
'minima.ndarray_backend_numpy.CPUDevice.__eq__': ( 'ndarray_backend_numpy.html#cpudevice.__eq__',
'minima/ndarray_backend_numpy.py'),
'minima.ndarray_backend_numpy.CPUDevice.__hash__': ( 'ndarray_backend_numpy.html#cpudevice.__hash__',
'minima/ndarray_backend_numpy.py'),
'minima.ndarray_backend_numpy.CPUDevice.__repr__': ( 'ndarray_backend_numpy.html#cpudevice.__repr__',
'minima/ndarray_backend_numpy.py'),
'minima.ndarray_backend_numpy.CPUDevice.empty': ( 'ndarray_backend_numpy.html#cpudevice.empty',
'minima/ndarray_backend_numpy.py'),
'minima.ndarray_backend_numpy.CPUDevice.enabled': ( 'ndarray_backend_numpy.html#cpudevice.enabled',
'minima/ndarray_backend_numpy.py'),
'minima.ndarray_backend_numpy.CPUDevice.full': ( 'ndarray_backend_numpy.html#cpudevice.full',
'minima/ndarray_backend_numpy.py'),
'minima.ndarray_backend_numpy.CPUDevice.one_hot': ( 'ndarray_backend_numpy.html#cpudevice.one_hot',
'minima/ndarray_backend_numpy.py'),
'minima.ndarray_backend_numpy.CPUDevice.rand': ( 'ndarray_backend_numpy.html#cpudevice.rand',
'minima/ndarray_backend_numpy.py'),
'minima.ndarray_backend_numpy.CPUDevice.randn': ( 'ndarray_backend_numpy.html#cpudevice.randn',
'minima/ndarray_backend_numpy.py'),
'minima.ndarray_backend_numpy.Device': ( 'ndarray_backend_numpy.html#device',
'minima/ndarray_backend_numpy.py'),
'minima.ndarray_backend_numpy.all_devices': ( 'ndarray_backend_numpy.html#all_devices',
'minima/ndarray_backend_numpy.py'),
'minima.ndarray_backend_numpy.compact': ( 'ndarray_backend_numpy.html#compact',
'minima/ndarray_backend_numpy.py'),
'minima.ndarray_backend_numpy.cpu': ( 'ndarray_backend_numpy.html#cpu',
'minima/ndarray_backend_numpy.py'),
'minima.ndarray_backend_numpy.default_device': ( 'ndarray_backend_numpy.html#default_device',
'minima/ndarray_backend_numpy.py'),
'minima.ndarray_backend_numpy.ewise_add': ( 'ndarray_backend_numpy.html#ewise_add',
'minima/ndarray_backend_numpy.py'),
'minima.ndarray_backend_numpy.ewise_div': ( 'ndarray_backend_numpy.html#ewise_div',
Expand Down
77 changes: 19 additions & 58 deletions minima/autograd.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
# AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/00_autograd.ipynb.

# %% auto 0
__all__ = ['NDArray', 'LAZY_MODE', 'TENSOR_COUNTER', 'Value', 'Device', 'CPUDevice', 'cpu', 'all_devices', 'Operator', 'TensorOp',
'Tensor']
__all__ = ['NDArray', 'LAZY_MODE', 'TENSOR_COUNTER', 'Value', 'Operator', 'TensorOp', 'Tensor']

# %% ../nbs/00_autograd.ipynb 3
from typing import (
Expand All @@ -16,10 +15,11 @@
import numpy
import numpy as ARRAY_API
import minima as mi
from .minima_backend import Device, ARRAY_API, NDArray, default_device
numpy.set_printoptions(precision=6, linewidth=160)
# from graphviz import Digraph

# %% ../nbs/00_autograd.ipynb 70
# %% ../nbs/00_autograd.ipynb 67
class Value:
"""
A class representing a scalar value and its gradient in a computational graph.
Expand Down Expand Up @@ -329,55 +329,12 @@ def build_topo(node):



# %% ../nbs/00_autograd.ipynb 71
# %% ../nbs/00_autograd.ipynb 68
NDArray = numpy.ndarray
LAZY_MODE = False
TENSOR_COUNTER = 0

# %% ../nbs/00_autograd.ipynb 72
class Device:
"""Indicates the device supporting an NDArray."""


class CPUDevice(Device):
"""Represents data that sits in CPU"""

def __repr__(self):
return "minima.cpu()"

def __hash__(self):
return self.__repr__().__hash__()

def __eq__(self, other):
return isinstance(other, CPUDevice)

def enabled(self):
return True

def zeros(self, *shape, dtype="float32"):
return numpy.zeros(shape, dtype=dtype)

def ones(self, *shape, dtype="float32"):
return numpy.ones(shape, dtype=dtype)

def randn(self, *shape):
return numpy.random.randn(*shape)

def rand(self, *shape):
return numpy.random.rand(*shape)

def one_hot(self, n, i, dtype="float32"):
return numpy.eye(n, dtype=dtype)[i]

def cpu():
"""Return cpu device"""
return CPUDevice()

def all_devices():
"""return a list of all available devices"""
return [cpu()]

# %% ../nbs/00_autograd.ipynb 73
# %% ../nbs/00_autograd.ipynb 69
class Operator:

def __call__(self, *args):
Expand All @@ -389,14 +346,14 @@ def compute(self, *args: Tuple[NDArray]):
def gradient(self, out_grad: 'Value', node: 'Value') -> Union['Value', Tuple['Value']]:
raise NotImplementedError()

# %% ../nbs/00_autograd.ipynb 74
# %% ../nbs/00_autograd.ipynb 70
class TensorOp(Operator):
""" Op class specialized to output tensors, will be alternate subclasses for other structures """

def __call__(self, *args):
return Tensor.make_from_op(self, args)

# %% ../nbs/00_autograd.ipynb 75
# %% ../nbs/00_autograd.ipynb 71
class Value:
"""
Represents a node within a computational graph.
Expand Down Expand Up @@ -432,7 +389,7 @@ def compute_cached_data(self):
def is_leaf(self):
return self.op is None

# %% ../nbs/00_autograd.ipynb 76
# %% ../nbs/00_autograd.ipynb 72
class Tensor(Value):
"""
A Tensor represents a multidimensional array of values in a computational graph.
Expand Down Expand Up @@ -478,7 +435,7 @@ def __init__(
Returns:
None.
"""

if isinstance(array, Tensor):
if device is None:
device = array.device
Expand All @@ -488,20 +445,18 @@ def __init__(
data = array.compute_cached_data()
else:
# fall back, copy through numpy conversion
data = Tensor._array_from_numpy(
array.numpy(), device=device, dtype=dtype
)
data = Tensor._array_from_numpy(array.numpy(), device=device, dtype=dtype)
else:
device = device if device else cpu()
device = device if device else default_device()
data = Tensor._array_from_numpy(array, device=device, dtype=dtype)

self._init(None, (), data=data, requires_grad=requires_grad, )

def __repr__(self):
return "minima.Tensor(\n" + str(self.compute_cached_data()) + ")"
return "minima.Tensor(" + str(self.compute_cached_data()) + ")"

def __str__(self):
return "tensor(" + self.compute_cached_data().__str__() + ")"
return "minima.Tensor(" + self.compute_cached_data().__str__() + ")"

def __len__(self) -> int:
return len(self.cached_data)
Expand Down Expand Up @@ -684,6 +639,9 @@ def data(self, value):
)
self.cached_data = value.compute_cached_data()

def item(self):
return self.compute_cached_data().item()


@property
def shape(self):
Expand Down Expand Up @@ -937,6 +895,9 @@ def transpose(self, axes=None):
def exp(self) -> 'Tensor':
return mi.operators.Exp()(self)

def argmax(self, axis=None, keepdims=None):
return Tensor(ARRAY_API.argmax(self.compute_cached_data(), axis=axis, keepdims=keepdims))

__radd__ = __add__
__rmul__ = __mul__
__rsub__ = __sub__
Expand Down
15 changes: 4 additions & 11 deletions minima/data.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/05_data.ipynb.

# %% auto 0
__all__ = ['Sampler', 'BatchSampler', 'Dataset', 'collate', 'DataLoader']
__all__ = ['Sampler', 'BatchSampler', 'Dataset', 'DataLoader']

# %% ../nbs/05_data.ipynb 2
from typing import (
Expand Down Expand Up @@ -116,10 +116,6 @@ def apply_transforms(self, x):
return x

# %% ../nbs/05_data.ipynb 6
def collate(b):
xs,ys = zip(*b)
return torch.stack(xs),torch.stack(ys)

class DataLoader:
"""
A custom data loader class.
Expand Down Expand Up @@ -148,7 +144,7 @@ def __init__(self,
self.sampler = sampler if sampler else Sampler(dataset, shuffle)
self.batch_sampler = batch_sampler if batch_sampler else BatchSampler(self.sampler, batch_size, drop_last)
self.num_workers = num_workers # --> TODO: implement a multiprocessing DataLoader :3
self.collate_fn = collate
self.collate_fn = collate_fn
self.drop_last = drop_last

def __iter__(self):
Expand All @@ -162,10 +158,7 @@ def __iter__(self):
>>> for batch in dataloader:
>>> # Process the batch
"""
if self.num_workers:
with mp.Pool(self.num_workers) as ex:
yield from ex.map(self.dataset.__getitem__, iter(self.batch_sampler))
if self.collate_fn is not None:
yield from (self.collate_fn(self.dataset[batch_idxs]) for batch_idxs in self.batch_sampler)
else:
yield from (self.dataset[batch_idxs] for batch_idxs in self.batch_sampler)


Loading

0 comments on commit d5de119

Please sign in to comment.