Mojo struct
PRegisterBuffer
struct PRegisterBuffer[accum_type_: DType, dtype: DType, BM: Int, BN: Int, BK: Int, WM: Int, WN: Int, num_m_mmas: Int, num_n_mmas: Int, output_frag_size: Int, shared_memory_backed: Bool, mma_shape: IndexList[3], k_group_size: Int, tr_load_enabled: Bool = False]
Fields
- reg_tile (
LayoutTensor[accum_type_, Layout.row_major((num_n_mmas * num_m_mmas), output_frag_size), MutableAnyOrigin, address_space=AddressSpace(5)]): - shared_memory_tile (
LayoutTensor[dtype, blocked_product(Layout.row_major(BM, BK), Layout.row_major(1, (BN // BK)), False), MutableAnyOrigin, address_space=AddressSpace(3)]):
Implemented traits
AnyType,
RegisterBuffer,
RegisterMMABuffer,
UnknownDestructibility
Aliases
__del__is_trivial
alias __del__is_trivial = True
mma_dtype
alias mma_dtype = dtype
mma_tile_layout
alias mma_tile_layout = Layout.row_major(num_m_mmas, simd_width_of[dtype]())
MMATileType
alias MMATileType = LayoutTensor[dtype, Layout.row_major(num_m_mmas, simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)]
OutputTileType
alias OutputTileType = LayoutTensor[dtype, Layout.row_major(num_m_mmas, output_frag_size), MutableAnyOrigin, address_space=AddressSpace(5)]
reg_dtype
alias reg_dtype = accum_type_
reg_tile_layout
alias reg_tile_layout = Layout.row_major((num_n_mmas * num_m_mmas), output_frag_size)
RegisterTileType
alias RegisterTileType = LayoutTensor[accum_type_, Layout.row_major((num_n_mmas * num_m_mmas), output_frag_size), MutableAnyOrigin, address_space=AddressSpace(5)]
shared_memory_layout
alias shared_memory_layout = blocked_product(Layout.row_major(BM, BK), Layout.row_major(1, (BN // BK)), False)
SharedMemoryTileType
alias SharedMemoryTileType = LayoutTensor[dtype, blocked_product(Layout.row_major(BM, BK), Layout.row_major(1, (BN // BK)), False), MutableAnyOrigin, address_space=AddressSpace(3)]
Methods
__init__
__init__(out self, shared_ptr: UnsafePointer[Scalar[dtype], address_space=AddressSpace(3), mut=mut, origin=origin])
get_mma_tile_reg
get_mma_tile_reg[tile_idx: Int, k_idx: Int](self) -> LayoutTensor[dtype, Layout.row_major(num_m_mmas, simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)]
Returns:
get_mma_tile_shared
get_mma_tile_shared[tile_idx: Int, k_idx: Int](self) -> LayoutTensor[dtype, Layout.row_major(num_m_mmas, simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)]
Returns:
get_mma_tile
get_mma_tile[tile_idx: Int, k_idx: Int](self) -> LayoutTensor[dtype, Layout.row_major(num_m_mmas, simd_width_of[dtype]()), MutableAnyOrigin, address_space=AddressSpace(5)]
Returns:
get_dtype
vectorize
vectorize(self) -> LayoutTensor[accum_type_, coalesce(LayoutTensor._compute_tile_layout[True, accum_type_, Layout.row_major((num_n_mmas * num_m_mmas), output_frag_size), MutableAnyOrigin, AddressSpace(5), Layout(IntTuple(1), IntTuple(1)), _get_layout_type(Layout.row_major((num_n_mmas * num_m_mmas), output_frag_size), AddressSpace(5)), _get_index_type(Layout.row_major((num_n_mmas * num_m_mmas), output_frag_size), AddressSpace(5)), False, align_of[accum_type_](), 1, output_frag_size]()[1], True), MutableAnyOrigin, address_space=AddressSpace(5), element_layout=LayoutTensor._divide_tiles[True, accum_type_, Layout.row_major((num_n_mmas * num_m_mmas), output_frag_size), MutableAnyOrigin, AddressSpace(5), Layout(IntTuple(1), IntTuple(1)), _get_layout_type(Layout.row_major((num_n_mmas * num_m_mmas), output_frag_size), AddressSpace(5)), _get_index_type(Layout.row_major((num_n_mmas * num_m_mmas), output_frag_size), AddressSpace(5)), False, align_of[accum_type_](), 1, output_frag_size]()[0], layout_int_type=_get_layout_type(Layout.row_major((num_n_mmas * num_m_mmas), output_frag_size), AddressSpace(5)), linear_idx_type=_get_index_type(Layout.row_major((num_n_mmas * num_m_mmas), output_frag_size), AddressSpace(5))]
Returns:
zero
zero(self)
get_reg_tile
get_reg_tile(self) -> LayoutTensor[accum_type_, Layout.row_major((num_n_mmas * num_m_mmas), output_frag_size), MutableAnyOrigin, address_space=AddressSpace(5)]
Returns:
get_shared_memory_tile
get_shared_memory_tile(self, tile_idx: Int) -> LayoutTensor[dtype, LayoutTensor._compute_tile_layout[True, dtype, blocked_product(Layout.row_major(BM, BK), Layout.row_major(1, (BN // BK)), False), MutableAnyOrigin, AddressSpace(3), Layout(IntTuple(1), IntTuple(1)), _get_layout_type(blocked_product(Layout.row_major(BM, BK), Layout.row_major(1, (BN // BK)), False), AddressSpace(3)), _get_index_type(blocked_product(Layout.row_major(BM, BK), Layout.row_major(1, (BN // BK)), False), AddressSpace(3)), False, align_of[dtype](), BM, BK]()[0], MutableAnyOrigin, address_space=AddressSpace(3), layout_int_type=_get_layout_type(blocked_product(Layout.row_major(BM, BK), Layout.row_major(1, (BN // BK)), False), AddressSpace(3)), linear_idx_type=_get_index_type(blocked_product(Layout.row_major(BM, BK), Layout.row_major(1, (BN // BK)), False), AddressSpace(3)), masked=_tile_is_masked[blocked_product(Layout.row_major(BM, BK), Layout.row_major(1, (BN // BK)), False), BM, BK]()]
Returns:
copy_to_shared
copy_to_shared(self)
Was this page helpful?
Thank you! We'll create more content like this.
Thank you for helping us improve!