Skip to main content

Mojo struct

KVBuffer

struct KVBuffer[kv_t: MHAOperand, //, mma_shape: IndexList[3], k_group_size: Int, swizzle: Optional[Swizzle], BN: Int, WN: Int, BK: Int, num_threads: Int, depth: Int, kv_num_heads: Int, transpose: Bool]

Fields

  • mma_tile (KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].MMATileType):
  • smem_ptr (UnsafePointer[Scalar[kv_t.dtype], MutAnyOrigin, address_space=AddressSpace.SHARED]):
  • kv_cache_iter (KVCacheIterator[kv_t, BN, kv_num_heads, depth]):
  • lds_base_ptrs (InlineArray[UInt32, 2]):
  • warp_id (UInt32):

Implemented traits

AnyType, ImplicitlyDestructible

comptime members

base_layout

comptime base_layout = Layout.row_major(VariadicList(BN, BK))

MMA_K

comptime MMA_K = mma_shape.__getitem__[Int](2)

MMA_N

comptime MMA_N = mma_shape.__getitem__[Int](1)

MMATileType

comptime MMATileType = LayoutTensor[kv_t.dtype, Layout.row_major(VariadicList(((KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].num_mmas * KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].num_k_mmas2) * KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].num_k_tiles), KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].simd_width)), MutAnyOrigin, address_space=AddressSpace.LOCAL]

num_k_mmas2

comptime num_k_mmas2 = ceildiv(BK, (KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].MMA_K * k_group_size))

num_k_tiles

comptime num_k_tiles = ceildiv(depth if transpose else WN, BK)

num_mmas

comptime num_mmas = ceildiv(WN if transpose else depth, KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].MMA_N)

num_repeats

comptime num_repeats = (depth // BK)

SharedTileType

comptime SharedTileType = LayoutTensor[kv_t.dtype, KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].smem_layout, MutAnyOrigin, address_space=AddressSpace.SHARED]

SharedWarpTileType

comptime SharedWarpTileType = LayoutTensor[kv_t.dtype, LayoutTensor._compute_tile_layout[KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].wtile_dim0, KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].wtile_dim1]()[0], MutAnyOrigin, address_space=AddressSpace.SHARED, layout_int_type=_get_layout_type(KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].smem_layout, AddressSpace.SHARED), linear_idx_type=_get_index_type(KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].smem_layout, AddressSpace.SHARED), masked=_tile_is_masked[KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].smem_layout, KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].wtile_dim0, KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].wtile_dim1]()]

simd_width

comptime simd_width = simd_width_of[kv_t.dtype]()

smem_layout

comptime smem_layout = blocked_product(KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].base_layout, KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].tiler_layout, False)

smem_stage_size

comptime smem_stage_size = KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].smem_layout.size()

tiler_layout

comptime tiler_layout = Layout.row_major(VariadicList(1, KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].num_repeats))

wtile_dim0

comptime wtile_dim0 = WN

wtile_dim1

comptime wtile_dim1 = BK

Methods

__init__

__init__(out self, k_cache: kv_t, batch_idx: UInt, head_idx: UInt, shared_ptr: UnsafePointer[Scalar[kv_t.dtype], MutAnyOrigin, address_space=AddressSpace.SHARED], end: UInt, warp_id: UInt32)

load_from_dram

load_from_dram[buffer_idx: Int](mut self)

get_mma_tile

get_mma_tile[k_mma_tile_idx: Int, bk_tile_idx: Int](self) -> LayoutTensor[kv_t.dtype, LayoutTensor._compute_tile_layout[tile_size=(LayoutTensor._compute_tile_layout[tile_size=(Layout.row_major(VariadicList(((KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].num_mmas * KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].num_k_mmas2) * KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].num_k_tiles), KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].simd_width)).shape[0].value() // KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].num_k_tiles), axis=0]()[0].shape[0].value() // KVBuffer[mma_shape, k_group_size, swizzle, BN, WN, BK, num_threads, depth, kv_num_heads, transpose].num_k_mmas2), axis=0]()[0], MutAnyOrigin, address_space=AddressSpace.LOCAL]

Returns:

LayoutTensor

copy_to_shared

copy_to_shared(self)

load_from_shared

load_from_shared(self, buffer: UInt)

load_from_shared[bk_tile: Int](self, buffer: UInt)

Was this page helpful?