Skip to main content

Mojo function

mha_gpu_naive

mha_gpu_naive[output_type: DType, k_t: MHAOperand, v_t: MHAOperand, mask_t: MHAMask, //, ragged: Bool = False, sink: Bool = False, _use_valid_length: Bool = False, _is_cache_length_accurate: Bool = False](q: LayoutTensor[dtype, layout, origin, element_layout=element_layout, layout_int_type=layout_int_type, linear_idx_type=linear_idx_type, masked=masked, alignment=alignment], k: k_t, v: v_t, mask_functor: mask_t, output: LayoutTensor[output_type, layout, origin, element_layout=element_layout, layout_int_type=layout_int_type, linear_idx_type=linear_idx_type, masked=masked, alignment=alignment], valid_length: ManagedTensorSlice[io_spec, static_spec=static_spec], scale: Float32, batch_size: Int, max_prompt_len: Int, max_cache_size: Int, num_heads: Int, depth: Int, group: Int, ctx: DeviceContext, sink_weights: OptionalReg[LayoutTensor[dtype, Layout.row_major(-1), MutableAnyOrigin]] = None)

mha_gpu_naive[q_type: DType, k_type: DType, v_type: DType, output_type: DType, mask_type: DType, //, sink: Bool = False](q: LayoutTensor[q_type, layout, origin, element_layout=element_layout, layout_int_type=layout_int_type, linear_idx_type=linear_idx_type, masked=masked, alignment=alignment], k: LayoutTensor[k_type, layout, origin, element_layout=element_layout, layout_int_type=layout_int_type, linear_idx_type=linear_idx_type, masked=masked, alignment=alignment], v: LayoutTensor[v_type, layout, origin, element_layout=element_layout, layout_int_type=layout_int_type, linear_idx_type=linear_idx_type, masked=masked, alignment=alignment], mask: LayoutTensor[mask_type, layout, origin, element_layout=element_layout, layout_int_type=layout_int_type, linear_idx_type=linear_idx_type, masked=masked, alignment=alignment], output: LayoutTensor[output_type, layout, origin, element_layout=element_layout, layout_int_type=layout_int_type, linear_idx_type=linear_idx_type, masked=masked, alignment=alignment], scale: Float32, batch_size: Int, seq_len: Int, num_keys: Int, num_heads: Int, depth: Int, group: Int, ctx: DeviceContext, sink_weights: OptionalReg[LayoutTensor[q_type, Layout.row_major(-1), MutableAnyOrigin]] = None)

mha_gpu_naive[q_type: DType, output_type: DType, cache_t: KVCacheT, mask_t: MHAMask, //, ragged: Bool = False, sink: Bool = False](q: LayoutTensor[q_type, layout, origin, element_layout=element_layout, layout_int_type=layout_int_type, linear_idx_type=linear_idx_type, masked=masked, alignment=alignment], k: cache_t, v: cache_t, mask_functor: mask_t, output: LayoutTensor[output_type, layout, origin, element_layout=element_layout, layout_int_type=layout_int_type, linear_idx_type=linear_idx_type, masked=masked, alignment=alignment], valid_length: ManagedTensorSlice[io_spec, static_spec=static_spec], scale: Float32, batch_size: Int, max_prompt_len: Int, max_cache_size: Int, num_heads: Int, depth: Int, group: Int, ctx: DeviceContext, sink_weights: OptionalReg[LayoutTensor[q_type, Layout.row_major(-1), MutableAnyOrigin]] = None)

Was this page helpful?