Python module
driver
Exposes APIs for interacting with hardware, such as allocating tensors on a GPU and moving tensors between the CPU and GPU. It provides interfaces for memory management, device properties, and hardware monitoring. Through these APIs, you can control data placement, track resource utilization, and configure device settings for optimal performance.
Accelerator()
Creates an accelerator device with the specified ID.
Provides access to GPU or other hardware accelerators in the system.
from max import driver
device = driver.Accelerator()
# Or specify GPU id
device = driver.Accelerator(id=0) # First GPU
device = driver.Accelerator(id=1) # Second GPU
# Get device id
device_id = device.id
from max import driver
device = driver.Accelerator()
# Or specify GPU id
device = driver.Accelerator(id=0) # First GPU
device = driver.Accelerator(id=1) # Second GPU
# Get device id
device_id = device.id
CPU()
Creates a CPU device for the specified NUMA node.
from max import driver
# Create default CPU device
device = driver.CPU()
# Or specify NUMA node id if using NUMA architecture
device = driver.CPU(id=0) # First NUMA node
device = driver.CPU(id=1) # Second NUMA node
# Get device id
device_id = device.id
from max import driver
# Create default CPU device
device = driver.CPU()
# Or specify NUMA node id if using NUMA architecture
device = driver.CPU(id=0) # First NUMA node
device = driver.CPU(id=1) # Second NUMA node
# Get device id
device_id = device.id
DLPackArray
class max.driver.DLPackArray(*args, **kwargs)
Device
class max.driver.Device(_device: Device)
A hardware device abstraction for computation.
Provides a unified interface for CPU and accelerator (e.g. GPU) devices.
accelerator()
Creates an accelerator device with the specified ID.
Provides access to GPU or other hardware accelerators in the system.
from max import driver
# Create default accelerator (usually first available GPU)
device = driver.Accelerator()
# Or specify GPU id
device = driver.Accelerator(id=0) # First GPU
device = driver.Accelerator(id=1) # Second GPU
# Get device id
device_id = device.id
from max import driver
# Create default accelerator (usually first available GPU)
device = driver.Accelerator()
# Or specify GPU id
device = driver.Accelerator(id=0) # First GPU
device = driver.Accelerator(id=1) # Second GPU
# Get device id
device_id = device.id
api
property api*: str*
Provides the programming interface used by the device.
-
Returns:
The programming interface identifier used by the device. One of:
cpu
for host devices.cuda
for NVIDIA GPUs.hip
for AMD GPUs.
from max import driver
device = driver.CPU()
device.api
from max import driver
device = driver.CPU()
device.api
cpu()
Creates a CPU device for the specified NUMA node.
from max import driver
# Create default CPU device
device = driver.CPU()
# Or specify NUMA node id if using NUMA architecture
device = driver.CPU(id=0) # First NUMA node
device = driver.CPU(id=1) # Second NUMA node
# Get device id
device_id = device.id
from max import driver
# Create default CPU device
device = driver.CPU()
# Or specify NUMA node id if using NUMA architecture
device = driver.CPU(id=0) # First NUMA node
device = driver.CPU(id=1) # Second NUMA node
# Get device id
device_id = device.id
id
property id*: int*
Provides the unique identifier for this device.
For CPU devices, this represents the NUMA node ID.
For GPU accelerators, this is the device ID relative to the host.
Combined with label
, this forms a unique device identifier
(e.g., gpu:0
or gpu:1
).
from max import driver
device = driver.CPU()
device.id
from max import driver
device = driver.CPU()
device.id
is_compatible
property is_compatible*: bool*
Returns whether this device is compatible with MAX.
is_host
property is_host
Checks whether this device is the CPU (host) device.
from max import driver
device = driver.CPU()
device.is_host
from max import driver
device = driver.CPU()
device.is_host
label
property label*: str*
Provides the device type identifier.
from max import driver
device = driver.CPU()
device.label
from max import driver
device = driver.CPU()
device.label
stats
Provides real-time utilization data for the device.
from max import driver
device = driver.CPU()
device.stats
from max import driver
device = driver.CPU()
device.stats
DeviceSpec
class max.driver.DeviceSpec(id: int, device_type: Literal['cpu', 'gpu'] = 'cpu')
Specification for a device, containing its ID and type.
This class provides a way to specify device parameters like ID and type (CPU/GPU) for creating Device instances.
accelerator()
static accelerator(id: int = -1)
Creates an accelerator (GPU) device specification.
cpu()
static cpu(id: int = -1)
Creates a CPU device specification.
device_type
device_type*: Literal['cpu', 'gpu']* = 'cpu'
Type of specified device.
id
id*: int*
Provided id for this device.
MemMapTensor
class max.driver.MemMapTensor(filename: PathLike, dtype: DType, shape: Sequence[int], mode='r+', offset=0)
Create a memory-mapped tensor from a binary file on disk.
The constructor argument semantics follow that of np.memmap.
read_only
property read_only*: bool*
Tensor
class max.driver.Tensor(shape: ~typing.Sequence[int], dtype: ~max.dtype.dtype.DType, device: ~max.driver.driver.Device = Device(_device=<max._driver.Device object>))
Device-resident tensor representation. Allocates memory onto a given device with the provided shape and dtype. Tensors can be sliced to provide strided views of the underlying memory, but any tensors input into model execution must be contiguous. Does not currently support setting items across multiple indices, but does support numpy-style slicing.
-
Parameters:
- dtype – DType of tensor
- shape – Tuple of positive, non-zero integers denoting the tensor shape.
- device – Device to allocate tensor onto.
contiguous()
contiguous() → Tensor
Creates a contiguous copy of the parent tensor.
copy()
Create a deep copy on an optionally given device.
If a device is None (default), a copy is created on the same device.
from max import driver
from max.dtype import DType
cpu_tensor = driver.Tensor([2, 3], dtype=DType.bfloat16, device=driver.CPU())
cpu_copy = cpu_tensor.copy()
from max import driver
from max.dtype import DType
cpu_tensor = driver.Tensor([2, 3], dtype=DType.bfloat16, device=driver.CPU())
cpu_copy = cpu_tensor.copy()
device
property device*: Device*
Device on which tensor is resident.
dtype
property dtype*: DType*
DType of constituent elements in tensor.
element_size
property element_size*: int*
Return the size of the element type in bytes.
from_dlpack()
classmethod from_dlpack(arr: Any, *, copy: bool | None = None) → Tensor
Create a tensor from an object implementing the dlpack protocol.
This usually does not result in a copy, and the producer of the object retains ownership of the underlying memory.
from_numpy()
Creates a tensor from a provided numpy array on the host device.
The underlying data is not copied unless the array is noncontiguous. If it is, a contiguous copy will be returned.
is_contiguous
property is_contiguous*: bool*
Whether or not tensor is contiguously allocated in memory. Returns false if the tensor is a non-contiguous slice.
Currently, we consider certain situations that are contiguous as non-contiguous for the purposes of our engine, such as when a tensor has negative steps.
is_host
property is_host*: bool*
Whether or not tensor is host-resident. Returns false for GPU tensors, true for CPU tensors.
from max import driver
from max.dtype import DType
cpu_tensor = driver.Tensor([2, 3], dtype=DType.bfloat16, device=driver.CPU())
print(cpu_tensor.is_host)
from max import driver
from max.dtype import DType
cpu_tensor = driver.Tensor([2, 3], dtype=DType.bfloat16, device=driver.CPU())
print(cpu_tensor.is_host)
item()
item() → Any
Returns the scalar value at a given location. Currently implemented only for zero-rank tensors. The return type is converted to a Python built-in type.
num_elements
property num_elements*: int*
Returns the number of elements in this tensor.
Rank-0 tensors have 1 element by convention.
rank
property rank*: int*
Tensor rank.
scalar()
classmethod scalar(value: ~typing.Any, dtype: ~max.dtype.dtype.DType, device: ~max.driver.driver.Device = Device(_device=<max._driver.Device object>)) → Tensor
Create a scalar value of a given dtype and value.
shape
Shape of tensor.
to()
Return a tensor that’s guaranteed to be on the given device.
The tensor is only copied if the input device is different from the device upon which the tensor is already resident.
to_numpy()
to_numpy() → ndarray
Converts the tensor to a numpy array.
If the tensor is not on the host, an exception is raised.
view()
view(dtype: DType, shape: Sequence[int] | None = None) → Tensor
Return a new tensor with the given type and shape that shares the underlying memory.
If the shape is not given, it will be deduced if possible, or a ValueError is raised.
zeros()
classmethod zeros(shape: ~typing.Sequence[int], dtype: ~max.dtype.dtype.DType, device: ~max.driver.driver.Device = Device(_device=<max._driver.Device object>)) → Tensor
Allocates an tensor with all elements initialized to zero.
Was this page helpful?
Thank you! We'll create more content like this.
Thank you for helping us improve!