-
Notifications
You must be signed in to change notification settings - Fork 23
Description
(torch2.9_cu128_py312) gpj@xhh:~$ conda list
packages in environment at /home/gpj/miniconda3/envs/torch2.9_cu128_py312:
Name Version Build Channel
_libgcc_mutex 0.1 main
_openmp_mutex 5.1 1_gnu
bzip2 1.0.8 h5eee18b_6
ca-certificates 2025.9.9 h06a4308_0
cuequivariance 0.7.0 pypi_0 pypi
cuequivariance-ops-cu12 0.7.0 pypi_0 pypi
cuequivariance-ops-torch-cu12 0.7.0 pypi_0 pypi
cuequivariance-torch 0.7.0 pypi_0 pypi
expat 2.7.1 h6a678d5_0
filelock 3.19.1 pypi_0 pypi
fsspec 2025.9.0 pypi_0 pypi
jinja2 3.1.6 pypi_0 pypi
ld_impl_linux-64 2.44 h153f514_2
libffi 3.4.4 h6a678d5_1
libgcc-ng 11.2.0 h1234567_1
libgomp 11.2.0 h1234567_1
libnsl 2.0.0 h5eee18b_0
libstdcxx-ng 11.2.0 h1234567_1
libuuid 1.41.5 h5eee18b_0
libxcb 1.17.0 h9b100fa_0
libzlib 1.3.1 hb25bd0a_0
markupsafe 2.1.5 pypi_0 pypi
mpmath 1.3.0 pypi_0 pypi
ncurses 6.5 h7934f7d_0
networkx 3.5 pypi_0 pypi
numpy 2.3.3 pypi_0 pypi
nvidia-cublas-cu12 12.8.4.1 pypi_0 pypi
nvidia-cuda-cupti-cu12 12.8.90 pypi_0 pypi
nvidia-cuda-nvrtc-cu12 12.8.93 pypi_0 pypi
nvidia-cuda-runtime-cu12 12.8.90 pypi_0 pypi
nvidia-cudnn-cu12 9.10.2.21 pypi_0 pypi
nvidia-cufft-cu12 11.3.3.83 pypi_0 pypi
nvidia-cufile-cu12 1.13.1.3 pypi_0 pypi
nvidia-curand-cu12 10.3.9.90 pypi_0 pypi
nvidia-cusolver-cu12 11.7.3.90 pypi_0 pypi
nvidia-cusparse-cu12 12.5.8.93 pypi_0 pypi
nvidia-cusparselt-cu12 0.7.1 pypi_0 pypi
nvidia-ml-py 13.580.82 pypi_0 pypi
nvidia-nccl-cu12 2.27.5 pypi_0 pypi
nvidia-nvjitlink-cu12 12.8.93 pypi_0 pypi
nvidia-nvshmem-cu12 3.3.20 pypi_0 pypi
nvidia-nvtx-cu12 12.8.90 pypi_0 pypi
openssl 3.0.18 hd6dcaed_0
opt-einsum 3.4.0 pypi_0 pypi
pillow 11.3.0 pypi_0 pypi
pip 25.2 pyhc872135_1
platformdirs 4.5.0 pypi_0 pypi
pthread-stubs 0.3 h0ce48e5_1
python 3.12.12 hd17a9e1_1
readline 8.3 hc2a1206_0
scipy 1.16.2 pypi_0 pypi
setuptools 80.9.0 py312h06a4308_0
sqlite 3.50.2 hb25bd0a_1
sympy 1.14.0 pypi_0 pypi
tk 8.6.15 h54e0aa7_0
torch 2.9.0+cu128 pypi_0 pypi
torchvision 0.24.0+cu128 pypi_0 pypi
tqdm 4.67.1 pypi_0 pypi
triton 3.5.0 pypi_0 pypi
typing-extensions 4.15.0 pypi_0 pypi
tzdata 2025b h04d1e81_0
wheel 0.45.1 py312h06a4308_0
xorg-libx11 1.8.12 h9b100fa_1
xorg-libxau 1.0.12 h9b100fa_0
xorg-libxdmcp 1.1.5 h9b100fa_0
xorg-xorgproto 2024.1 h5eee18b_1
xz 5.6.4 h5eee18b_1
zlib 1.3.1 hb25bd0a_0
(torch2.9_cu128_py312) gpj@xhh:~$ python -c "import cuequivariance_torch as cuet"
Traceback (most recent call last):
File "", line 1, in
File "/home/gpj/miniconda3/envs/torch2.9_cu128_py312/lib/python3.12/site-packages/cuequivariance_torch/init.py", line 26, in
from .primitives.transpose import TransposeSegments, TransposeIrrepsLayout
File "/home/gpj/miniconda3/envs/torch2.9_cu128_py312/lib/python3.12/site-packages/cuequivariance_torch/primitives/transpose.py", line 183, in
from cuequivariance_ops_torch import segmented_transpose
File "/home/gpj/miniconda3/envs/torch2.9_cu128_py312/lib/python3.12/site-packages/cuequivariance_ops_torch/init.py", line 39, in
from cuequivariance_ops_torch.fused_layer_norm_torch import layer_norm_transpose
File "/home/gpj/miniconda3/envs/torch2.9_cu128_py312/lib/python3.12/site-packages/cuequivariance_ops_torch/fused_layer_norm_torch.py", line 17, in
from cuequivariance_ops.triton import (
File "/home/gpj/miniconda3/envs/torch2.9_cu128_py312/lib/python3.12/site-packages/cuequivariance_ops/triton/init.py", line 24, in
from .tuning_decorator import autotune_aot
File "/home/gpj/miniconda3/envs/torch2.9_cu128_py312/lib/python3.12/site-packages/cuequivariance_ops/triton/tuning_decorator.py", line 17, in
from .cache_manager import get_cache_manager
File "/home/gpj/miniconda3/envs/torch2.9_cu128_py312/lib/python3.12/site-packages/cuequivariance_ops/triton/cache_manager.py", line 255, in
cache_manager = CacheManager()
^^^^^^^^^^^^^^
File "/home/gpj/miniconda3/envs/torch2.9_cu128_py312/lib/python3.12/site-packages/cuequivariance_ops/triton/cache_manager.py", line 110, in init
self.gpu_information = get_gpu_information()
^^^^^^^^^^^^^^^^^^^^^
File "/home/gpj/miniconda3/envs/torch2.9_cu128_py312/lib/python3.12/site-packages/cuequivariance_ops/triton/cache_manager.py", line 66, in get_gpu_information
power_limit = pynvml.nvmlDeviceGetPowerManagementLimit(handle)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/gpj/miniconda3/envs/torch2.9_cu128_py312/lib/python3.12/site-packages/pynvml.py", line 3561, in nvmlDeviceGetPowerManagementLimit
_nvmlCheckReturn(ret)
File "/home/gpj/miniconda3/envs/torch2.9_cu128_py312/lib/python3.12/site-packages/pynvml.py", line 1061, in _nvmlCheckReturn
raise NVMLError(ret)
pynvml.NVMLError_NotSupported: Not Supported