Skip to main content
Log in

Mojo function

tma_wgmma_warp_specialized_gemm_kernel_persistent

tma_wgmma_warp_specialized_gemm_kernel_persistent[a_type: DType, b_type: DType, c_type: DType, a_layout: Layout, b_layout: Layout, a_tile_layout: Layout, b_tile_layout: Layout, c_layout: Layout, block_tile_shape: Index[3], wgmma_shape: Index[3], a_desc_layout: Layout, b_desc_layout: Layout, c_desc_layout: Layout, c_tma_layout: Layout, c_smem_layout: Layout, cluster_shape: StaticTuple[SIMD[int32, 1], 3], grid_shape: Index[2], a_swizzle: TensorMapSwizzle = TensorMapSwizzle(__init__[__mlir_type.!pop.int_literal](3)), b_swizzle: TensorMapSwizzle = TensorMapSwizzle(__init__[__mlir_type.!pop.int_literal](3)), c_swizzle: TensorMapSwizzle = TensorMapSwizzle(__init__[__mlir_type.!pop.int_literal](0)), transpose_b: Bool = True, num_threads: Int = 128, pipeline_stages: Int = 7, partitioned_multicast: Bool = False, use_tma_store: Bool = False, promotion_frequency: Int = 1, pdl_level: PDLLevel = PDLLevel(), elementwise_lambda_fn: OptionalReg[fn[DType, Int, Int](Index[2], SIMD[$0, $1]) capturing -> None] = OptionalReg[fn[DType, Int, Int](Index[2], SIMD[$0, $1]) capturing -> None]({:i1 0, 1})](a_tma_op: TMATensorTile[a_type, a_tile_layout, a_desc_layout], b_tma_op: TMATensorTile[b_type, b_tile_layout, b_desc_layout], c_tma_op: TMATensorTile[c_type, c_tma_layout, c_desc_layout], c: LayoutTensor[c_type, c_layout, MutableAnyOrigin], problem_shape: Index[3])

Was this page helpful?