Skip to main content

Mojo function

naive_blockwise_scaled_fp8_grouped_matmul

naive_blockwise_scaled_fp8_grouped_matmul[c_type: DType, a_type: DType, b_type: DType, a_scales_type: DType, b_scales_type: DType, a_offsets_type: DType, expert_ids_type: DType, c_layout: Layout, a_layout: Layout, b_layout: Layout, a_scale_layout: Layout, b_scale_layout: Layout, a_offsets_layout: Layout, expert_ids_layout: Layout, //, BLOCK_DIM_N: Int = 32, BLOCK_DIM_M: Int = 16, transpose_b: Bool = True, scales_granularity_mnk: OptionalReg[IndexList[3]] = None, elementwise_lambda_fn: OptionalReg[fn[dtype: DType, width: Int, *, alignment: Int = 1](IndexList[2], SIMD[dtype, width]) capturing -> None] = None](c: LayoutTensor[c_type, c_layout, MutableAnyOrigin], a: LayoutTensor[a_type, a_layout, MutableAnyOrigin], b: LayoutTensor[b_type, b_layout, MutableAnyOrigin], a_scales: LayoutTensor[a_scales_type, a_scale_layout, MutableAnyOrigin], b_scales: LayoutTensor[b_scales_type, b_scale_layout, MutableAnyOrigin], a_offsets: LayoutTensor[a_offsets_type, a_offsets_layout, MutableAnyOrigin], expert_ids: LayoutTensor[expert_ids_type, expert_ids_layout, MutableAnyOrigin], max_num_tokens_per_expert: Int, num_active_experts: Int, ctx: DeviceContext)

Was this page helpful?