Mojo struct
PagedKVCache
@register_passable(trivial)
struct PagedKVCache[dtype_: DType, kv_params_: KVCacheStaticParams, page_size: Int, scale_dtype_: DType = DType.invalid, quantization_granularity: Int = 1]
The PagedKVCache is a wrapper around the KVCache blocks for a given layer. It is used to access the KVCache blocks for PagedAttention.
Note: This struct represents a 4D view of a 6D PagedKVCacheCollection
tensor. The compile-time layout has UNKNOWN_VALUE for stride[0] because
the actual stride depends on num_layers from the parent tensor, which is
only known at runtime. This ensures offset calculations use the correct
runtime strides rather than incorrect compile-time values.
Parameters
- dtype_ (
DType): The dtype of the kv-cache. - kv_params_ (
KVCacheStaticParams): The kv-cache static parameters. - page_size (
Int): The size of the page. - scale_dtype_ (
DType): Dtype of the quantization scales (if quantization enabled). - quantization_granularity (
Int): Block size used for quantization (e.g. 128).
Fields
- blocks (
PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].blocks_type): - cache_lengths (
LayoutTensor[DType.uint32, Layout(IntTuple(-1)), ImmutAnyOrigin]): - lookup_table (
LayoutTensor[DType.uint32, Layout.row_major[2](), ImmutAnyOrigin]): - max_seq_length (
UInt32): - max_cache_length (
UInt32): - scales (
OptionalReg[LayoutTensor[PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].scale_dtype, PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].scales_layout, MutAnyOrigin]]):
Implemented traits
AnyType,
Copyable,
DevicePassable,
ImplicitlyCopyable,
ImplicitlyDestructible,
KVCacheT,
Movable,
TrivialRegisterType
comptime members
__copyinit__is_trivial
comptime __copyinit__is_trivial = True
__del__is_trivial
comptime __del__is_trivial = True
__moveinit__is_trivial
comptime __moveinit__is_trivial = True
blocks_layout
comptime blocks_layout = Layout(PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].blocks_shape, PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].blocks_strides)
blocks_shape
comptime blocks_shape = IntTuple(-1, page_size, Int.__init__[UInt](PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].kv_params.num_heads), Int.__init__[UInt](PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].kv_params.head_size))
blocks_strides
comptime blocks_strides = IntTuple(-1, (Int.__init__[UInt](PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].kv_params.num_heads) * Int.__init__[UInt](PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].kv_params.head_size)), Int.__init__[UInt](PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].kv_params.head_size), 1)
blocks_type
comptime blocks_type = LayoutTensor[PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].dtype, PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].blocks_layout, MutAnyOrigin]
device_type
comptime device_type = PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity]
dtype
comptime dtype = dtype_
head_dim_granularity
comptime head_dim_granularity = ceildiv(Int.__init__[UInt](PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].kv_params.head_size), quantization_granularity)
kv_params
comptime kv_params = kv_params_
page_size_
comptime page_size_ = page_size
quantization_enabled
comptime quantization_enabled = (scale_dtype_ != DType.invalid)
scale_dtype
comptime scale_dtype = scale_dtype_
scales_block_type
comptime scales_block_type = LayoutTensor[PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].scale_dtype, PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].scales_layout, MutAnyOrigin]
scales_layout
comptime scales_layout = Layout.row_major(PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].scales_shape)
scales_shape
comptime scales_shape = IntTuple(-1, page_size, Int.__init__[UInt](PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].kv_params.num_heads), PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].head_dim_granularity)
Methods
__init__
__init__(blocks: LayoutTensor[PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].dtype, PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].blocks_layout, MutAnyOrigin], cache_lengths: LayoutTensor[DType.uint32, Layout(IntTuple(-1)), ImmutAnyOrigin], lookup_table: LayoutTensor[DType.uint32, Layout.row_major[2](), ImmutAnyOrigin], max_seq_length: UInt32, max_cache_length: UInt32, scales: OptionalReg[LayoutTensor[PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].scale_dtype, PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].scales_layout, MutAnyOrigin]] = None) -> Self
get_type_name
static get_type_name() -> String
Returns:
String
max_tile_size
cache_lengths_nd
cache_lengths_nd(self) -> LayoutTensor[DType.uint32, Layout(IntTuple(-1)), ImmutAnyOrigin]
Returns:
cache_length
cache_length(self, batch_idx: Int) -> Int
Returns the length of the cache for a given batch index.
Returns:
row_idx
row_idx(self, batch_idx: UInt32, tok_idx: UInt32) -> UInt32
Returns the row idx when viewing the memory as a matrix.
Returns:
create_tma_tile
create_tma_tile[swizzle_mode: TensorMapSwizzle, *, BN: Int, BK: Int = padded_depth[PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].dtype, swizzle_mode, Int.__init__[UInt](PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].kv_params.head_size)]()](self, ctx: DeviceContext) -> TMATensorTile[PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].dtype, _split_last_layout[PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].dtype](IndexList[3, DType.int64](BN, 1, BK, Tuple[]()), swizzle_mode, True), _ragged_desc_layout[PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].dtype](IndexList[3, DType.int64](BN, 1, BK, Tuple[]()), swizzle_mode)]
Creates a TMA tile for this KV cache.
Returns:
TMATensorTile
create_ragged_tma_tile
create_ragged_tma_tile[swizzle_mode: TensorMapSwizzle, *, BN: Int, BK: Int = padded_depth[PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].dtype, swizzle_mode, Int.__init__[UInt](PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].kv_params.head_size)]()](self, ctx: DeviceContext, out tma: RaggedTMA3DTile[PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].dtype, swizzle_mode, BN, BK])
Returns:
load
load[width: Int, output_dtype: DType = PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].dtype](self, bs: Int, head_idx: Int, tok_idx: Int, head_dim_idx: Int) -> SIMD[output_dtype, width]
Loads an element from the given index.
Returns:
store
store(self, bs: Int, head_idx: Int, tok_idx: Int, head_dim_idx: Int, val: SIMD[PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].dtype, size])
Stores an element at the given index.
load_scale
load_scale[width: Int](self, bs: Int, head_idx: Int, tok_idx: Int, head_dim_idx: Int) -> SIMD[PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].scale_dtype, width]
Loads a quantization scale from the given index.
Returns:
store_scale
store_scale(self, bs: Int, head_idx: Int, tok_idx: Int, head_dim_idx: Int, scales: SIMD[PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].scale_dtype, size])
Stores the quantization scales at the given index.
load_quantized
load_quantized[width: Int](self, bs: Int, head_idx: Int, tok_idx: Int, head_dim_idx: Int) -> SIMD[PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].dtype, width]
Loads a quantized element from the given index.
Returns:
empty_cache
empty_cache(self) -> Bool
Returns true if the cache_lengths for all requests is 0, false otherwise.
Returns:
max_prompt_length
max_prompt_length(self) -> UInt32
Returns the maximum sequence length across all batches of the current request.
Returns:
max_context_length
max_context_length(self) -> UInt32
Returns the maximum cache length used across all batches of the current request.
Returns:
block_paged_ptr
block_paged_ptr[tile_size: Int](self, batch_idx: Int, start_tok_idx: Int, head_idx: Int, head_dim_idx: Int = 0) -> UnsafePointer[Scalar[PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].dtype], MutAnyOrigin]
Returns:
UnsafePointer
scales_block_paged_ptr
scales_block_paged_ptr(self, batch_idx: Int, start_tok_idx: Int, head_idx: Int, head_dim_idx: Int = 0) -> UnsafePointer[Scalar[PagedKVCache[dtype_, kv_params_, page_size, scale_dtype_, quantization_granularity].scale_dtype], MutAnyOrigin]
Returns a pointer to the scales block at the requested indices.
Returns:
UnsafePointer
Was this page helpful?
Thank you! We'll create more content like this.
Thank you for helping us improve!