Mojo function
flare_mla_decoding_dispatch
flare_mla_decoding_dispatch[k_t: MHAOperand, mask_t: MHAMask, dtype: DType, q_layout: Layout, //, kv_num_heads: Int, config: MHAConfig[dtype] = MHAConfig(SIMD(Int.__init__[IntTuple](q_layout.shape[(q_layout.rank() - 2)])), SIMD(Int.__init__[IntTuple](q_layout.shape[(q_layout.rank() - 1)])), Optional(None), Optional(None), Optional(None), Optional(None), Optional(None), 4, 1, FlashAttentionAlgorithm(-1), TensorMapSwizzle.SWIZZLE_128B), ragged: Bool = False, _is_cache_length_accurate: Bool = False, _use_valid_length: Bool = True, decoding_warp_split_k: Bool = False, per_token_scale_rope_aware: Bool = False](output: LayoutTensor[output.dtype, output.layout, output.origin, element_layout=output.element_layout, layout_int_type=output.layout_int_type, linear_idx_type=output.linear_idx_type, masked=output.masked, alignment=output.alignment], q: LayoutTensor[dtype, q_layout, q.origin, element_layout=q.element_layout, layout_int_type=q.layout_int_type, linear_idx_type=q.linear_idx_type, masked=q.masked, alignment=q.alignment], k: k_t, mask_functor: mask_t, valid_length: LayoutTensor[DType.uint32, valid_length.layout, valid_length.origin, element_layout=valid_length.element_layout, layout_int_type=valid_length.layout_int_type, linear_idx_type=valid_length.linear_idx_type, masked=valid_length.masked, alignment=valid_length.alignment], max_prompt_len: Int, max_cache_valid_length: Int, scale: Float32, ctx: DeviceContext, scalar_args_buf: LayoutTensor[DType.int64, Layout.row_major(VariadicList(3)), MutAnyOrigin], kv_input_row_offsets: OptionalReg[LayoutTensor[DType.uint32, Layout.row_major(VariadicList(-1)), ImmutAnyOrigin]] = None, num_partitions: Optional[Int] = None, q_scale_ptr: UnsafePointer[Float32, MutAnyOrigin] = UnsafePointer())
Was this page helpful?
Thank you! We'll create more content like this.
Thank you for helping us improve!