Skip to main content

Mojo function

flash_attention_dispatch

flash_attention_dispatch[k_t: MHAOperand, v_t: MHAOperand, mask_t: MHAMask, score_mod_t: ScoreModTrait, dtype: DType, q_layout: Layout, //, kv_num_heads: Int, use_score_mod: Bool = False, config: MHAConfig = MHAConfig(dtype, UInt(Int.__init__[IntTuple[ComptimeOrigin]](q_layout.shape[(q_layout.rank() - 2)])), UInt(Int.__init__[IntTuple[ComptimeOrigin]](q_layout.shape[(q_layout.rank() - 1)])), OptionalReg[UInt](None), OptionalReg[UInt](None), OptionalReg[UInt](None), OptionalReg[UInt](None), OptionalReg[UInt](None), 4, 1, FlashAttentionAlgorithm(-1), OptionalReg[UInt](None), TensorMapSwizzle(3)), ragged: Bool = False, sink: Bool = False, _is_flash_attention_applicable: Bool = True, _is_cache_length_accurate: Bool = False, _use_valid_length: Bool = True, _padded_ndbuffer: Bool = False, decoding_warp_split_k: Bool = False](output: LayoutTensor[dtype, layout, origin, element_layout=element_layout, layout_int_type=layout_int_type, linear_idx_type=linear_idx_type, masked=masked, alignment=alignment], q: LayoutTensor[dtype, q_layout, origin, element_layout=element_layout, layout_int_type=layout_int_type, linear_idx_type=linear_idx_type, masked=masked, alignment=alignment], k: k_t, v: v_t, mask_functor: mask_t, score_mod_functor: score_mod_t, valid_length: ManagedTensorSlice[io_spec, static_spec=static_spec], max_prompt_len: Int, max_cache_valid_length: Int, scale: Float32, is_token_generation: Bool, ctx: DeviceContext, kv_input_row_offsets: OptionalReg[LayoutTensor[DType.uint32, Layout.row_major(-1), MutableAnyOrigin]] = None, num_partitions: OptionalReg[Int] = None, sink_weights: OptionalReg[LayoutTensor[dtype, Layout.row_major(-1), MutableAnyOrigin]] = None)

Was this page helpful?