Skip to main content

Mojo struct

SM100TensorAccumulatorSS

@register_passable(trivial) struct SM100TensorAccumulatorSS[operand_type: DType, accum_type: DType, MMA_M: Int, MMA_N: Int, BM: Int, BN: Int, BK: Int, compute_BK: Int, num_softmax_threads: Int, swizzle_a: TensorMapSwizzle = TensorMapSwizzle.SWIZZLE_128B, swizzle_b: TensorMapSwizzle = TensorMapSwizzle.SWIZZLE_128B, *, transpose_b: Bool = True, cta_group: Int = 1, pipeline_stages: Int = 1]

Fields

  • mbar (UnsafePointer[SharedMemBarrier, address_space=AddressSpace.SHARED]):
  • pipeline (PipelineState[pipeline_stages]):

Implemented traits

AnyType, Copyable, ImplicitlyCopyable, Movable, UnknownDestructibility

Aliases

__copyinit__is_trivial

alias __copyinit__is_trivial = True

__del__is_trivial

alias __del__is_trivial = True

__moveinit__is_trivial

alias __moveinit__is_trivial = True

a_offset

alias a_offset = MMAOperandOffsetFn[SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].operand_t, BM, BK, swizzle_a, True, MMA_M, 16]()

a_t

alias a_t = MMASmemDescriptor

ab_t

alias ab_t = UMMADescriptorSS[SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].operand_t]

accum_t

alias accum_t = accum_type

b_offset

alias b_offset = MMAOperandOffsetFn[SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].operand_t, BN, BK, swizzle_b, transpose_b, MMA_N, 16]()

b_t

alias b_t = MMASmemDescriptor

c_t

alias c_t = TMemAccumulator[SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].accum_t, (BM // SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].num_m_blocks_per_warp), MMA_N, SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].num_m_blocks_per_warp, SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].num_n_mmas, num_softmax_threads]

idesc

alias idesc = UMMAInsDescriptor.create[UMMAKind.KIND_F16, SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].accum_t, SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].operand_t, SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].operand_t, Index[dtype=DType.uint32](MMA_M, MMA_N), transpose_b=transpose_b]()

MMA_K

alias MMA_K = 16

num_k_mmas

alias num_k_mmas = (compute_BK // 16)

num_m_blocks_per_warp

alias num_m_blocks_per_warp = ((2 * BM) // num_softmax_threads)

num_m_mmas

alias num_m_mmas = (BM // MMA_M)

num_n_mmas

alias num_n_mmas = (BN // MMA_N)

operand_t

alias operand_t = operand_type

smem_ptr_t

alias smem_ptr_t = UnsafePointer[Scalar[SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].operand_t], address_space=AddressSpace.SHARED]

Methods

__init__

__init__(smem: UnsafePointer[SharedMemBarrier, address_space=AddressSpace.SHARED]) -> Self

check_constraints

static check_constraints()

init

init(self)

mma_descriptors

static mma_descriptors[dtype_a: DType, dtype_b: DType](p_a: UnsafePointer[Scalar[dtype_a], address_space=AddressSpace.SHARED], p_b: UnsafePointer[Scalar[dtype_b], address_space=AddressSpace.SHARED]) -> UMMADescriptorSS[SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].operand_t]

Returns:

UMMADescriptorSS

mma

mma(mut self, a: MMASmemDescriptor, b: MMASmemDescriptor, c_base: TMemAccumulator[SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].accum_t, (BM // SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].num_m_blocks_per_warp), MMA_N, SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].num_m_blocks_per_warp, SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].num_n_mmas, num_softmax_threads], scale_c: UInt32)

wait_for_tmem

wait_for_tmem(self)

Wait for the accumulator tmem to finish being read.

wait_for_mma

wait_for_mma(self, c_base: TMemAccumulator[SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].accum_t, (BM // SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].num_m_blocks_per_warp), MMA_N, SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].num_m_blocks_per_warp, SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].num_n_mmas, num_softmax_threads]) -> TMemAccumulator[SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].accum_t, (BM // SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].num_m_blocks_per_warp), MMA_N, SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].num_m_blocks_per_warp, SM100TensorAccumulatorSS[operand_type, accum_type, MMA_M, MMA_N, BM, BN, BK, compute_BK, num_softmax_threads, swizzle_a, swizzle_b, transpose_b=transpose_b, cta_group=cta_group, pipeline_stages=pipeline_stages].num_n_mmas, num_softmax_threads]

Wait for the accumulator tmem to finish being read.

Returns:

TMemAccumulator

tmem_arrive_init

tmem_arrive_init(self)

tmem_arrive

tmem_arrive(mut self)

Indicate that the accumulator is ready to be updated.

Was this page helpful?