Mojo function
batched_matmul
batched_matmul[rank: Int, a_type: DType, b_type: DType, c_type: DType, //, *, transpose_a: Bool, transpose_b: Bool, elementwise_epilogue_fn: OptionalReg[fn[DType, Int, Int, Int](Index[$2], SIMD[$0, $1]) capturing -> None] = OptionalReg[fn[DType, Int, Int, Int](Index[$2], SIMD[$0, $1]) capturing -> None]({:i1 0, 1}), saturated_vnni: Bool = False, single_thread_blocking_override: Bool = False, target: StringSlice[StaticConstantOrigin] = __init__[__mlir_type.!kgen.string]("cpu")](c_buf: NDBuffer[c_type, rank, origin], a_buf: NDBuffer[a_type, rank, origin], b_buf: NDBuffer[b_type, rank, origin], *, context: DeviceContextPtr = DeviceContextPtr())
batched_matmul[rank: Int, a_type: DType, b_type: DType, c_type: DType, //, *, transpose_b: Bool, elementwise_epilogue_fn: OptionalReg[fn[DType, Int, Int, Int](Index[$2], SIMD[$0, $1]) capturing -> None] = OptionalReg[fn[DType, Int, Int, Int](Index[$2], SIMD[$0, $1]) capturing -> None]({:i1 0, 1}), saturated_vnni: Bool = False, target: StringSlice[StaticConstantOrigin] = __init__[__mlir_type.!kgen.string]("cpu")](c_buf: NDBuffer[c_type, rank, origin], a_buf: NDBuffer[a_type, rank, origin], b_buf: NDBuffer[b_type, rank, origin], *, context: DeviceContextPtr = DeviceContextPtr())
Was this page helpful?
Thank you! We'll create more content like this.
Thank you for helping us improve!