Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 0 additions & 30 deletions docs/attention_blocks_flowchart.md

This file was deleted.

Binary file removed docs/attention_blocks_flowchart.png
Binary file not shown.
29 changes: 10 additions & 19 deletions src/maxdiffusion/max_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -501,26 +501,17 @@ def get_flash_block_sizes(config):
"""Create custom flash attention BlockSizes."""
flash_block_sizes = None
if len(config.flash_block_sizes.keys()) > 0:
attention_is_tokamax = "tokamax" in config.attention
user_block_sizes:Dict[str, int] = config.flash_block_sizes
if attention_is_tokamax:
max_logging.log("Tokamax kernel specified, Note: Tokamax only supports fused backward kernel."
"Hence following flash block properties specified will be ignored:"
f"block_q: {user_block_sizes['block_q']},"
f"block_q_dq: {user_block_sizes.get('block_q_dq')},"
f"block_kv_dq: {user_block_sizes.get('block_kv_dq')},"
f"use_fused_bwd_kernel: {user_block_sizes.get('use_fused_bwd_kernel')}"
)
use_fused_bwd_kernel = config.flash_block_sizes.get("use_fused_bwd_kernel", False)
flash_block_sizes = splash_attention_kernel.BlockSizes(
block_q=user_block_sizes.get("block_q_dkv", user_block_sizes["block_kv"]) if attention_is_tokamax else user_block_sizes["block_q"],
block_kv_compute=user_block_sizes["block_kv_compute"],
block_kv=user_block_sizes["block_kv"],
block_q_dkv=user_block_sizes["block_q_dkv"],
block_kv_dkv=user_block_sizes["block_kv_dkv"],
block_kv_dkv_compute=user_block_sizes["block_kv_dkv_compute"],
block_q_dq=None if attention_is_tokamax else value_or_none(user_block_sizes, "block_q_dq"),
block_kv_dq=None if attention_is_tokamax else value_or_none(user_block_sizes, "block_kv_dq"),
use_fused_bwd_kernel=True if attention_is_tokamax else value_or_none(user_block_sizes, "use_fused_bwd_kernel"),
block_q=config.flash_block_sizes["block_q"],
block_kv_compute=config.flash_block_sizes["block_kv_compute"],
block_kv=config.flash_block_sizes["block_kv"],
block_q_dkv=config.flash_block_sizes["block_q_dkv"],
block_kv_dkv=config.flash_block_sizes["block_kv_dkv"],
block_kv_dkv_compute=config.flash_block_sizes["block_kv_dkv_compute"],
block_q_dq=value_or_none(config.flash_block_sizes, "block_q_dq"),
block_kv_dq=value_or_none(config.flash_block_sizes, "block_kv_dq"),
use_fused_bwd_kernel=value_or_none(config.flash_block_sizes, "use_fused_bwd_kernel"),
)
return flash_block_sizes

Expand Down
7 changes: 3 additions & 4 deletions src/maxdiffusion/models/attention_flax.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,15 +234,14 @@ def _tpu_flash_attention(
if flash_block_sizes and key.shape[1] == query.shape[1]:
block_sizes = flash_block_sizes
else:
block_size_q = flash_block_sizes.block_q if flash_block_sizes else q_max_block_size
block_sizes = splash_attention_kernel.BlockSizes(
block_q=block_size_q,
block_q=min(q_max_block_size, query.shape[2]),
block_kv_compute=min(kv_max_block_size, key.shape[2]),
block_kv=min(kv_max_block_size, key.shape[2]),
block_q_dkv=block_size_q,
block_q_dkv=min(q_max_block_size, query.shape[2]),
block_kv_dkv=min(kv_max_block_size, key.shape[2]),
block_kv_dkv_compute=min(kv_max_block_size, query.shape[2]),
block_q_dq=None if attention_kernel == "tokamax_flash" else block_size_q,
block_q_dq=None if attention_kernel == "tokamax_flash" else block_sizes.block_q_dq,
block_kv_dq=None if attention_kernel == "tokamax_flash" else min(kv_max_block_size, query.shape[2]),
use_fused_bwd_kernel=True if attention_kernel == "tokamax_flash" else False,
)
Expand Down
114 changes: 57 additions & 57 deletions src/maxdiffusion/tests/wan_transformer_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,69 +179,69 @@ def test_wan_block(self):
dummy_encoder_hidden_states = jnp.ones((batch_size, 512, dim))

dummy_temb = jnp.ones((batch_size, 6, dim))

wan_block = WanTransformerBlock(
rngs=rngs,
dim=dim,
ffn_dim=ffn_dim,
num_heads=num_heads,
qk_norm=qk_norm,
cross_attn_norm=cross_attn_norm,
eps=eps,
attention="flash",
mesh=mesh,
flash_block_sizes=flash_block_sizes,
)
with mesh:
with mesh, nn_partitioning.axis_rules(self.config.logical_axis_rules):
wan_block = WanTransformerBlock(
rngs=rngs,
dim=dim,
ffn_dim=ffn_dim,
num_heads=num_heads,
qk_norm=qk_norm,
cross_attn_norm=cross_attn_norm,
eps=eps,
attention="flash",
mesh=mesh,
flash_block_sizes=flash_block_sizes,
)
dummy_output = wan_block(dummy_hidden_states, dummy_encoder_hidden_states, dummy_temb, dummy_rotary_emb)
assert dummy_output.shape == dummy_hidden_states.shape

def test_wan_attention(self):
for attention_kernel in ["flash", "tokamax_flash"]:
pyconfig.initialize(
[
None,
os.path.join(THIS_DIR, "..", "configs", "base_wan_14b.yml"),
f"attention={attention_kernel}"
],
unittest=True
pyconfig.initialize(
[
None,
os.path.join(THIS_DIR, "..", "configs", "base_wan_14b.yml"),
],
unittest=True,
)
config = pyconfig.config

batch_size = 1
channels = 16
frames = 21
height = 90
width = 160
hidden_states_shape = (batch_size, frames, height, width, channels)
dummy_hidden_states = jnp.ones(hidden_states_shape)
wan_rot_embed = WanRotaryPosEmbed(attention_head_dim=128, patch_size=[1, 2, 2], max_seq_len=1024)
dummy_rotary_emb = wan_rot_embed(dummy_hidden_states)

key = jax.random.key(0)
rngs = nnx.Rngs(key)
devices_array = create_device_mesh(config)

flash_block_sizes = get_flash_block_sizes(config)

mesh = Mesh(devices_array, config.mesh_axes)
batch_size = 1
query_dim = 5120
with mesh, nn_partitioning.axis_rules(self.config.logical_axis_rules):
attention = FlaxWanAttention(
rngs=rngs,
query_dim=query_dim,
heads=40,
dim_head=128,
attention_kernel="flash",
mesh=mesh,
flash_block_sizes=flash_block_sizes,
)
config = pyconfig.config
batch_size = 1
channels = 16
frames = 21
height = 90
width = 160
hidden_states_shape = (batch_size, frames, height, width, channels)
dummy_hidden_states = jnp.ones(hidden_states_shape)
wan_rot_embed = WanRotaryPosEmbed(attention_head_dim=128, patch_size=[1, 2, 2], max_seq_len=1024)
dummy_rotary_emb = wan_rot_embed(dummy_hidden_states)

key = jax.random.key(0)
rngs = nnx.Rngs(key)
devices_array = create_device_mesh(config)
mesh = Mesh(devices_array, config.mesh_axes)
batch_size = 1
query_dim = 5120
with mesh, nn_partitioning.axis_rules(config.logical_axis_rules):
flash_block_sizes = get_flash_block_sizes(config)
attention = FlaxWanAttention(
rngs=rngs,
query_dim=query_dim,
heads=40,
dim_head=128,
attention_kernel=attention_kernel,
mesh=mesh,
flash_block_sizes=flash_block_sizes,
)
dummy_hidden_states_shape = (batch_size, 75600, query_dim)
dummy_hidden_states_shape = (batch_size, 75600, query_dim)

dummy_hidden_states = jnp.ones(dummy_hidden_states_shape)
dummy_encoder_hidden_states = jnp.ones(dummy_hidden_states_shape)
dummy_output = attention(
hidden_states=dummy_hidden_states, encoder_hidden_states=dummy_encoder_hidden_states, rotary_emb=dummy_rotary_emb
)
assert dummy_output.shape == dummy_hidden_states_shape
dummy_hidden_states = jnp.ones(dummy_hidden_states_shape)
dummy_encoder_hidden_states = jnp.ones(dummy_hidden_states_shape)
dummy_output = attention(
hidden_states=dummy_hidden_states, encoder_hidden_states=dummy_encoder_hidden_states, rotary_emb=dummy_rotary_emb
)
assert dummy_output.shape == dummy_hidden_states_shape

# dot product
try:
Expand Down
Loading