Skip to content

Commit

Permalink
Some fixes to the blender pipeline and docs (#213)
Browse files Browse the repository at this point in the history
* Updated blender devops scripts

* Added debug listener to pulsar animation

* updated docs
  • Loading branch information
Helveg committed Jan 26, 2021
1 parent e28ee66 commit d37c584
Show file tree
Hide file tree
Showing 6 changed files with 85 additions and 26 deletions.
40 changes: 33 additions & 7 deletions bsb/blender/_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,9 +139,24 @@ def animate():
pass


def print_debug(lvl, cat, msg, i=None, total=None):
if cat.startswith("cell_"):
return
kwargs = {}
if i is not None:
kwargs["end"] = "\r"
print(f"[{cat}] {msg}", **kwargs)


def devnull(*args, **kwargs):
pass


def _pulsar(results, cells, **kwargs):
import math

cells = list(cells)

# Frames per second
fps = kwargs.get("fps", 60)
# Milliseconds per second
Expand All @@ -152,6 +167,9 @@ def _pulsar(results, cells, **kwargs):
spd = kwargs.get("spike_duration", 5)
# Afterburner: fade out time after animation
ab = kwargs.get("afterburn", 30)
# Get the listener that deals with progress reports
listener_name = kwargs.get("listener", "devnull")
listener = globals().get(listener_name, devnull)
# Spike width: number of frames of rising/falling edge of spike animation
sw = math.ceil(spd / 2 / mpf)
# Create the signal processor functions. They calculate cell intensity during anim.
Expand All @@ -161,18 +179,18 @@ def _pulsar(results, cells, **kwargs):
# Set up compositor with a glare node.
_pulsar_glare()
# Retrieve cell activity from the given results
cell_activity = _pulsar_cell_activity(cells, results)
cell_activity = _pulsar_cell_activity(cells, results, listener)
# Animate the cell keyframes
_pulsar_animate(cells, cell_activity, mpf, sw, ab, cap, intensity)
_pulsar_animate(cells, cell_activity, mpf, sw, ab, cap, intensity, listener)


animate.pulsar = _pulsar
_crowded_pulsars = ["granule_cell", "glomerulus"]


def _pulsar_animate(cells, cell_activity, mpf, sw, ab, cap, intensity):
def _pulsar_animate(cells, cell_activity, mpf, sw, ab, cap, intensity, listener):
last_frame = 0
for cell in cells:
for i, cell in enumerate(cells):
# Hardcoded granule cell solution, fix later.
_min = 0.3 if cell.type.name not in _crowded_pulsars else 0.0
spike_frames = (cell_activity[cell.id] / mpf).astype(int)
Expand Down Expand Up @@ -253,13 +271,21 @@ def intensity(i, min=0.3):
return cap, intensity


def _pulsar_cell_activity(cells, results):
def _pulsar_cell_activity(cells, results, listener):
cell_activity = {}
for cell in cells:
for i, cell in enumerate(cells):
listener("debug", "cell", f"id: {cell.id}", i=i, total=len(cells))
if str(cell.id) not in results:
listener("warn", "cell_data_not_found", f"No data for {cell.id}")
cell_activity[cell.id] = _np.empty(0)
continue
activity = results[str(cell.id)][()]
if not len(activity):
listener("info", "cell_silent", f"Cell {cell.id} does not fire.")
cell_activity[cell.id] = _np.empty(0)
continue
cell_activity[cell.id] = activity = results[str(cell.id)][:, 1]
listener("debug", "cell_activity", f"Cell {cell.id} fires {len(activity)} times.")
cell_activity[cell.id] = results[str(cell.id)][:, 1]
return cell_activity


Expand Down
9 changes: 2 additions & 7 deletions devops/blender-pipe/jrender.slurm
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#!/bin/bash -l
#SBATCH --job-name="Blender"
#SBATCH --job-name="Blender render"
#SBATCH --mail-type=ALL
#SBATCH --time=01:00:00
#SBATCH --nodes=25
Expand All @@ -12,10 +12,5 @@

export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK

module swap gcc/8.3.0 gcc/9.3.0
module load daint-mc
module load cray-python/3.8.2.1
module load PyExtensions/python3-CrayGNU-20.08
module load h5py/2.10.0-CrayGNU-20.08-python3-parallel

source load_blender.sh
srun python render.py $1 $2
9 changes: 4 additions & 5 deletions devops/blender-pipe/jsequence.slurm
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
#!/bin/bash -l
#SBATCH --job-name="Render Blender"
#SBATCH --job-name="Blender seauence"
#SBATCH --mail-type=ALL
#SBATCH --mail-user=robin.deschepper93@gmail.com
#SBATCH --time=00:15:00
#SBATCH --time=00:45:00
#SBATCH --nodes=1
#SBATCH --ntasks-per-core=1
#SBATCH --ntasks-per-node=1
Expand All @@ -13,5 +12,5 @@

export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK

source $HOME/load_blender.sh
srun blender-2.91.0-cacd57b67a15-linux64/blender -b $1 -E CYCLES -P render.py -- $2
source load_blender.sh
srun blender -b $1 -E CYCLES -P sequence.py -- $2
10 changes: 10 additions & 0 deletions devops/blender-pipe/load_blender.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
module load daint-mc Mesa/.18.3.3-CrayGNU-20.08
module load gcc/9.3.0
# Multicore stack
module load daint-mc
# Python 3 & mpi4py
module load cray-python/3.8.2.1
# Cython
module load PyExtensions/python3-CrayGNU-20.08
# h5py
module load h5py/2.10.0-CrayGNU-20.08-python3-parallel
7 changes: 3 additions & 4 deletions devops/blender-pipe/render.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,23 +15,22 @@
print("pathinfo:", op.parts, len(op.parts), op.parts[:-1])
if len(op.parts) > 1:
Path(os.path.join(*op.parts[:-1])).mkdir(parents=True, exist_ok=True)
op.mkdir(exist_ok=False)

print(f"Starting blender {e} job")

subprocess.check_call(
[
"blender-2.90.0-linux64/blender",
"blender",
"-b",
f,
"-E",
e,
"-o",
o,
"-s",
rank,
str(rank),
"-j",
size,
str(size),
"-a",
]
)
Expand Down
36 changes: 33 additions & 3 deletions docs/source/guides/blender.rst
Original file line number Diff line number Diff line change
Expand Up @@ -55,13 +55,13 @@ you want or you can continue to use some of the BSB blendins:
import bpy, bsb.core, h5py, itertools
network = bsb.core.from_hdf5("mynetwork.hdf5")
# Blend the network into the current scene
network.for_blender().blend("scaffold", bpy.context.scene)
# Blend the network into the current scene under the name `scaffold`
network.for_blender().blend(bpy.context.scene, "scaffold")
# Load all cell types
network.load_populations()
# Or, if you'd like to use the populations:
populations = network.get_populations()
cells = itertools.chain(p.cells for p in populations)
cells = itertools.chain(*(p.cells for p in populations.values()))
# Use the 'pulsar' animation to animate all cells with the simulation results
with h5py.File("my_results.hdf5", "r") as f:
network.animate.pulsar(f, cells)
Expand All @@ -82,3 +82,33 @@ you want or you can continue to use some of the BSB blendins:
unpopulated versions of your Blender files, run the blendin script, save as another
file, render it and make the required changes to the unpopulated version, repeating the
process. Optimizations are likely to be added in the future.

Blender HPC workflow
====================

The ``devops/blender-pipe`` folder contains scripts to facilitate the rendering and
sequencing of BSB blendfiles on HPC systems. Copy them together to a directory on the HPC
system and make sure that the ``blender`` command opens Blender. The pipeline contains 2
steps, ``rendering`` each frame in parallel and ``sequencing`` the rendered images into a
video.

jrender.slurm
-------------

The render jobscript uses ``render.py`` to invoke Blender. Each Blender process will be
tasked with rendering a certain proportion of the frames. ``jrender.slurm`` takes 2
arguments, the blendfile and the output image folder:

.. code-block:: bash
sbatch jrender.slurm my_file.blend my_file_imgs
jsequence.slurm
---------------

The sequencing jobscript stitches together the rendered frames into a video. This has to
be done in serial on a single node. It takes the blendfile and image folder as arguments:

.. code-block:: bash
sbatch jsequence.slurm my_file.blend my_file_imgs

0 comments on commit d37c584

Please sign in to comment.