Skip to content

Commit

Permalink
Feature device discover (#41)
Browse files Browse the repository at this point in the history
* Corrected Prophesee Cmake and relaxed serial number requirement
* Improved vendor-specific instructions
* Added SDL example layer and polished examples
* Added device inference
* Added CUDA code for tensor buffer
* Separated CPU from CUDA build
  • Loading branch information
Jegp committed Sep 26, 2022
1 parent c013752 commit 5f7c075
Show file tree
Hide file tree
Showing 30 changed files with 412 additions and 387 deletions.
2 changes: 1 addition & 1 deletion .github/deps/manylinux_dependencies.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ echo `pwd`
cd /root

# Install libtorch
curl -L -s -m 100 https://download.pytorch.org/libtorch/nightly/cpu/libtorch-shared-with-deps-latest.zip > libtorch.zip
curl -L -s -m 100 https://download.pytorch.org/libtorch/cpu/libtorch-shared-with-deps-1.12.1%2Bcpu.zip > libtorch.zip
# We use Python to Unzip because
# - Modern linux systems uses PIDs above 65k
# - Old versions of unzip do not cope well with high PIDs
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/publish.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name: Publish to PyPi

on: [push, pull_request]
on: [pull_request, release]

# Thanks to https://github.com/pypa/cibuildwheel/blob/main/examples/github-deploy.yml
jobs:
Expand Down
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -29,3 +29,6 @@ result
*.dat
*.mp4
*generated.h
*.deb
release/
wheelhouse/
7 changes: 4 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ First, install [PyTorch](https://pytorch.org/) and [libcaer](https://gitlab.com/
Then install `aestream` via pip: `pip install aestream`

```python
# Stream events from a DVS camera over USB at address 2:4
with DVSInput(2, 4, (640, 480)) as stream:
# Stream events from a DVS camera over USB
with DVSInput((640, 480)) as stream:
while True:
frame = stream.read() # Provides a (640, 480) tensor
...
Expand All @@ -44,6 +44,7 @@ with UDPInput((640, 480), port=3333) as stream:
```

More examples can be found in [our example folder](https://github.com/norse/aestream/tree/master/example).
Please note the examples may require additional dependencies (such as [Norse](https://github.com/norse/norse) for spiking networks or [PySDL](https://github.com/py-sdl/py-sdl2) for rendering). To install all the requirements, simply stand in the `aestream` root directory and run `pip install -r example/requirements.txt`

## Usage (CLI)

Expand Down Expand Up @@ -136,7 +137,7 @@ Please cite `aestream` if you use it in your work:
month = {August},
year = 2022,
publisher = {Zenodo},
version = {0.2.0},
version = {0.3.0},
doi = {10.5281/zenodo.6322829},
url = {https://doi.org/10.5281/zenodo.6322829}
}
Expand Down
Empty file added example/__init__.py
Empty file.
4 changes: 4 additions & 0 deletions example/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
matplotlib
norse
pysdl2
pysdl2-dll
18 changes: 18 additions & 0 deletions example/sdl.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import sdl2.ext

WHITE = (256 << 16 | 256 << 8 | 256)

def create_sdl_surface(*shape):
sdl2.ext.init()
window = sdl2.ext.Window("AEStream window", shape)
window.show()
# window = sdl2.SDL_CreateWindow("AEStream window", 100, 100, *shape, 0)

factory = sdl2.ext.SpriteFactory(sdl2.ext.SOFTWARE)
renderer = factory.create_sprite_render_system(window)
pixels = sdl2.ext.pixelaccess.pixels2d(renderer)

return window, pixels

def events_to_bw(events):
return events.int() * (255 << 16)
12 changes: 8 additions & 4 deletions example/udp_client.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import datetime
import time

import torch # Torch is needed to import c10 (Core TENsor) context
import torch # Torch is needed to import c10 (Core TENsor) context
from aestream import UDPInput

# Start a stream, receiving tensors of shape (640, 480)
Expand All @@ -10,20 +10,24 @@
with UDPInput((640, 480), "cpu", 4301) as stream:

# In this case, we read() every 500ms
interval = 0.5
interval = 0.5
t_0 = time.time()

out = []
# Loop forever
while True:
# When 500 ms passed...
if t_0 + interval <= time.time():

# Grab a tensor of the events arriving during the past 500ms
frame = stream.read()
out.append(frame.clone())

# Reset the time so we're again counting to 500ms
t_0 = time.time()

# Sum the incoming events and print along the timestamp
time_string = datetime.datetime.fromtimestamp(t_0).time()
print(f"Frame at {time_string} with {frame.sum()} events")
print(f"Frame at {time_string} with {frame.sum()} events")
fs = torch.stack(out)
torch.save(fs, "f.dat")
47 changes: 47 additions & 0 deletions example/usb_edgedetection.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
# Import the deep learning library, PyTorch
import torch
# Import the spiking neural network library, Norse
import norse

# Import the DVS camera streaming library, AEstream
from aestream import DVSInput

# Initialize our canvas
from sdl import create_sdl_surface, events_to_bw
window, pixels = create_sdl_surface(640 * 3, 480)

# Create horizontal and vertical edge detectors
kernel_size = 9
gaussian = torch.sigmoid(torch.linspace(-10, 10, kernel_size + 1))
kernel = (gaussian.diff() - 0.14).repeat(kernel_size, 1)
kernels = torch.stack((kernel, kernel.T))
convolution = torch.nn.Conv2d(1, 2, kernel_size, padding=12, bias=False, dilation=3)
convolution.weight = torch.nn.Parameter(kernels.unsqueeze(1))

# Create Norse network
# - One refractory cell to inhibit pixels
# - One convolutional edge-detection layer
net = norse.torch.SequentialState(
norse.torch.LIFRefracCell(),
convolution,
)
state = None # Start with empty state

try:
# Start streaming from a DVS camera on USB 2:2 and put them on the CPU
with DVSInput((640, 480)) as stream:
while True: # Loop forever
# Read a tensor (640, 480) tensor from the camera
tensor = stream.read()
# Run the tensor through the network, while updating the state
with torch.inference_mode():
filtered, state = net(tensor.view(1, 1, 640, 480), state)

# Render tensors
pixels[0:640] = events_to_bw(tensor) # Input events
pixels[640:640 * 2] = events_to_bw(filtered[0,0]) # First channel
pixels[640 * 2:640 * 3] = events_to_bw(filtered[0,1]) # Second channel
window.refresh()

finally:
window.close()
62 changes: 22 additions & 40 deletions example/usb_norse.py
Original file line number Diff line number Diff line change
@@ -1,51 +1,33 @@
import time

# Import the deep learning library, PyTorch
import torch
import norse.torch as snn
import matplotlib

matplotlib.use("Qt5Agg")
import matplotlib.animation as animation
import matplotlib.pyplot as plt
# Import the spiking neural network library, Norse
import norse

# Import the DVS camera streaming library, AEstream
from aestream import DVSInput

## Example modified from: https://matplotlib.org/stable/tutorials/advanced/blitting.html

# Initialize our canvas
fig, (ax1, ax2) = plt.subplots(1, 2)
image1 = ax1.imshow(torch.zeros(260, 346), cmap="gray", vmin=0, vmax=1)
image2 = ax2.imshow(torch.zeros(260, 346), cmap="gray", vmin=0, vmax=2)
plt.show(block=False)
plt.pause(0.1)
bg = fig.canvas.copy_from_bbox(fig.bbox)
ax1.draw_artist(image1)
ax2.draw_artist(image2)
fig.canvas.blit(fig.bbox)
from sdl import create_sdl_surface, events_to_bw
window, pixels = create_sdl_surface(640 * 2, 480)

# Initialize PyTorch network
net = snn.LICell().cuda()
state = None
# Create a simple Norse leaky integrator
net = norse.torch.LICell(p=norse.torch.LIParameters(tau_syn_inv=100))
state = None # Start with empty state

# Start streaming from a DVS camera on USB 2:2 and put them on the GPU
try:
with DVSInput(2, 2, (640, 480), device="cuda") as stream:
while True:
# Read a tensor (346, 260) tensor from the camera
tensor = stream.read()
# Start streaming from a DVS camera on USB 2:2 and put them on the CPU
with DVSInput((640, 480), device="cuda") as stream:
while True: # Loop forever
# Read a tensor (640, 480) tensor from the camera
tensor = stream.read().cpu()
# Run the tensor through the network, while updating the state
with torch.inference_mode():
filtered, state = net(tensor.view(1, 1, 346, 260), state)
filtered, state = net(tensor.view(1, 640, 480), state)

# Redraw figure
fig.canvas.restore_region(bg)
image1.set_data(tensor.T.cpu().numpy())
image2.set_data(filtered.squeeze().T.cpu().numpy())
ax1.draw_artist(image1)
ax2.draw_artist(image2)
fig.canvas.blit(fig.bbox)
fig.canvas.flush_events()
# Render tensors
pixels[0:640] = events_to_bw(tensor) # Input events
pixels[640:640 * 2] = events_to_bw(filtered[0]) # First channel
window.refresh()

# Pause to only loop 100 times per second
plt.pause(0.01)
except Exception as e:
print("Error", e)
finally:
window.close()
16 changes: 7 additions & 9 deletions example/usb_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,12 @@
net = torch.nn.Conv2d(1, 3, 5, padding=1, bias=False)
normal = torch.distributions.Normal(0, 1)
gaussian = normal
net.weight = torch.nn.Parameter(torch.tensor([[[
[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]
]]], dtype=torch.float32))

# Start streaming from a DVS camera on USB 2:7 and put them on the GPU
with DVSInput(2, 4, (640, 480), device="cpu") as stream:
net.weight = torch.nn.Parameter(
torch.tensor([[[[-1, -2, -1], [0, 0, 0], [1, 2, 1]]]], dtype=torch.float32)
)

# Start streaming from a DVS camera and put them on the GPU
with DVSInput((640, 480), device="gpu") as stream:
try:
while True:
# Read a tensor (346, 260) tensor from the camera
Expand All @@ -53,4 +51,4 @@
# Pause to only loop 10 times per second
plt.pause(0.01)
except Exception as e:
print("Error", e)
print("Error", e)
16 changes: 8 additions & 8 deletions example/usb_stream.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
import datetime
import time

import torch # Torch is needed to import c10 (Core TENsor) context
import torch # Torch is needed to import c10 (Core TENsor) context
from aestream import DVSInput

# Connect to a USB camera at address 2:3, receiving tensors of shape (340, 480)
# Connect to a USB camera, receiving tensors of shape (640, 480)
# By default, we send the tensors to the CPU
# The variable "stream" can now be `.read()` whenever a tensor is desired
with DVSInput(2, 4, (640, 480)) as stream:
# - if you have a GPU, try changing this to "cuda"
with DVSInput((640, 480), device="cpu") as stream:

# In this case, we read() every 500ms
interval = 0.5
# In this case, we read() every 100ms
interval = 0.1
t_0 = time.time()

# Loop forever
Expand All @@ -23,7 +23,7 @@

# Reset the time so we're again counting to 500ms
t_0 = time.time()

# Sum the incoming events and print along the timestamp
time_string = datetime.datetime.fromtimestamp(t_0).time()
print(f"Frame at {time_string} with {frame.sum()} events")
print(f"Frame at {time_string} with {frame.sum()} events")
45 changes: 15 additions & 30 deletions example/usb_video.py
Original file line number Diff line number Diff line change
@@ -1,37 +1,22 @@
import time

import torch
import matplotlib

matplotlib.use("Qt5Agg")
import matplotlib.animation as animation
import matplotlib.pyplot as plt

import torch # Torch is needed to import c10 (Core TENsor) context
from aestream import DVSInput
import sdl

## Example modified from: https://matplotlib.org/stable/tutorials/advanced/blitting.html
# Define our camera resolution
resolution = (640, 480)

# Initialize our canvas
fig, ax = plt.subplots()
image = ax.imshow(torch.zeros(260, 346), cmap="gray", vmin=0, vmax=1)
plt.show(block=False)
plt.pause(0.1)
bg = fig.canvas.copy_from_bbox(fig.bbox)
ax.draw_artist(image)
fig.canvas.blit(fig.bbox)
window, pixels = sdl.create_sdl_surface(*resolution)

# Start streaming from a DVS camera on USB 2:3
with DVSInput(2, 2, (640, 480)) as stream:
# Start streaming from a DVS camera on USB 2:2
with DVSInput(resolution, device="cuda") as stream:
while True:
# Read a tensor (346, 260) tensor from the camera
tensor = stream.read()

# Redraw figure
fig.canvas.restore_region(bg)
image.set_data(tensor.T.numpy())
ax.draw_artist(image)
fig.canvas.blit(fig.bbox)
fig.canvas.flush_events()

# Pause to only loop 10 times per second
plt.pause(0.01)
# Read a tensor (640, 480) tensor from the camera
tensor = stream.read().cpu()
#1tensor = torch.randn(640, 480).float() + 2

# Render pixels
pixels[:] = sdl.events_to_bw(tensor)
window.refresh()
time.sleep(0.01)
4 changes: 2 additions & 2 deletions flake.nix
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@
};
aestream = pkgs.stdenv.mkDerivation {
name = "aestream";
version = "0.2.0";
version = "0.3.0";
src = ./.;
nativeBuildInputs = [
pkgs.cmake
Expand Down Expand Up @@ -93,7 +93,7 @@
});
aestream-python = mach-nix.lib.${system}.buildPythonPackage {
pname = "aestream";
version = "0.2.0";
version = "0.3.0";
src = ./.;
requirements = python-requirements;

Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
ninja
numpy
torch>=1.9
torch>=1.10
Loading

0 comments on commit 5f7c075

Please sign in to comment.