Skip to content

Commit fa85d63

Browse files
committed
setting gated attn to true for all layers
1 parent dddb46f commit fa85d63

1 file changed

Lines changed: 4 additions & 4 deletions

File tree

src/maxdiffusion/models/ltx2/transformer_ltx2.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ def __init__(
145145
rope_type=rope_type,
146146
flash_block_sizes=flash_block_sizes,
147147
flash_min_seq_length=flash_min_seq_length,
148-
gated_attn=False,
148+
gated_attn=gated_attn,
149149
)
150150

151151
self.audio_norm1 = nnx.RMSNorm(
@@ -172,7 +172,7 @@ def __init__(
172172
rope_type=rope_type,
173173
flash_block_sizes=flash_block_sizes,
174174
flash_min_seq_length=flash_min_seq_length,
175-
gated_attn=False,
175+
gated_attn=gated_attn,
176176
)
177177

178178
# 2. Prompt Cross-Attention
@@ -200,7 +200,7 @@ def __init__(
200200
attention_kernel=self.attention_kernel,
201201
rope_type=rope_type,
202202
flash_block_sizes=flash_block_sizes,
203-
gated_attn=False,
203+
gated_attn=gated_attn,
204204
)
205205

206206
self.audio_norm2 = nnx.RMSNorm(
@@ -228,7 +228,7 @@ def __init__(
228228
rope_type=rope_type,
229229
flash_block_sizes=flash_block_sizes,
230230
flash_min_seq_length=flash_min_seq_length,
231-
gated_attn=False,
231+
gated_attn=gated_attn,
232232
)
233233

234234
# 3. Audio-to-Video (a2v) and Video-to-Audio (v2a) Cross-Attention

0 commit comments

Comments
 (0)