Skip to main content

Mojo struct

SM100TensorAccumulatorSS

@register_passable(trivial) struct SM100TensorAccumulatorSS[operand_type: DType, accum_type: DType, MMA_M: Int, MMA_N: Int, BM: Int, BN: Int, BK: Int, compute_BK: Int, num_softmax_threads: Int, swizzle_a: TensorMapSwizzle = 3, swizzle_b: TensorMapSwizzle = 3, *, transpose_b: Bool = True, cta_group: Int = 1, pipeline_stages: Int = 1]

Fields

  • mbar (UnsafePointer[SharedMemBarrier, address_space=AddressSpace(3)]):
  • pipeline (PipelineState[pipeline_stages]):

Implemented traits

AnyType, Copyable, ImplicitlyCopyable, Movable, UnknownDestructibility

Aliases

__copyinit__is_trivial

alias __copyinit__is_trivial = PipelineState[pipeline_stages].__copyinit__is_trivial if UnsafePointer[SharedMemBarrier, address_space=AddressSpace(3)].__copyinit__is_trivial else UnsafePointer[SharedMemBarrier, address_space=AddressSpace(3)].__copyinit__is_trivial

__del__is_trivial

alias __del__is_trivial = PipelineState[pipeline_stages].__del__is_trivial if UnsafePointer[SharedMemBarrier, address_space=AddressSpace(3)].__del__is_trivial else UnsafePointer[SharedMemBarrier, address_space=AddressSpace(3)].__del__is_trivial

__moveinit__is_trivial

alias __moveinit__is_trivial = PipelineState[pipeline_stages].__moveinit__is_trivial if UnsafePointer[SharedMemBarrier, address_space=AddressSpace(3)].__moveinit__is_trivial else UnsafePointer[SharedMemBarrier, address_space=AddressSpace(3)].__moveinit__is_trivial

a_offset

alias a_offset = MMAOperandOffsetFn[operand_type, BM, BK, swizzle_a, True, MMA_M, 16]()

a_t

alias a_t = MMASmemDescriptor

ab_t

alias ab_t = UMMADescriptorSS[operand_type]

accum_t

alias accum_t = accum_type

b_offset

alias b_offset = MMAOperandOffsetFn[operand_type, BN, BK, swizzle_b, transpose_b, MMA_N, 16]()

b_t

alias b_t = MMASmemDescriptor

c_t

alias c_t = TMemAccumulator[accum_type, (BM // ((BM * 2) // num_softmax_threads)), MMA_N, ((BM * 2) // num_softmax_threads), (BN // MMA_N), num_softmax_threads]

idesc

alias idesc = UMMAInsDescriptor.create[UMMAKind(2), accum_type, operand_type, operand_type, Index[dtype=DType.uint32](MMA_M, MMA_N), transpose_b=transpose_b]()

MMA_K

alias MMA_K = 16

num_k_mmas

alias num_k_mmas = (compute_BK // 16)

num_m_blocks_per_warp

alias num_m_blocks_per_warp = ((BM * 2) // num_softmax_threads)

num_m_mmas

alias num_m_mmas = (BM // MMA_M)

num_n_mmas

alias num_n_mmas = (BN // MMA_N)

operand_t

alias operand_t = operand_type

smem_ptr_t

alias smem_ptr_t = UnsafePointer[Scalar[operand_type], address_space=AddressSpace(3)]

Methods

__init__

__init__(smem: UnsafePointer[SharedMemBarrier, address_space=AddressSpace(3)]) -> Self

check_constraints

static check_constraints()

init

init(self)

mma_descriptors

static mma_descriptors[dtype_a: DType, dtype_b: DType](p_a: UnsafePointer[Scalar[dtype_a], address_space=AddressSpace(3)], p_b: UnsafePointer[Scalar[dtype_b], address_space=AddressSpace(3)]) -> UMMADescriptorSS[operand_type]

Returns:

UMMADescriptorSS

mma

mma(mut self, a: MMASmemDescriptor, b: MMASmemDescriptor, c_base: TMemAccumulator[accum_type, (BM // ((BM * 2) // num_softmax_threads)), MMA_N, ((BM * 2) // num_softmax_threads), (BN // MMA_N), num_softmax_threads], scale_c: UInt32)

wait_for_tmem

wait_for_tmem(self)

Wait for the accumulator tmem to finish being read.

wait_for_mma

wait_for_mma(self, c_base: TMemAccumulator[accum_type, (BM // ((BM * 2) // num_softmax_threads)), MMA_N, ((BM * 2) // num_softmax_threads), (BN // MMA_N), num_softmax_threads]) -> TMemAccumulator[accum_type, (BM // ((BM * 2) // num_softmax_threads)), MMA_N, ((BM * 2) // num_softmax_threads), (BN // MMA_N), num_softmax_threads]

Wait for the accumulator tmem to finish being read.

Returns:

TMemAccumulator

tmem_arrive_init

tmem_arrive_init(self)

tmem_arrive

tmem_arrive(mut self)

Indicate that the accumulator is ready to be updated.

Was this page helpful?