File tree Expand file tree Collapse file tree 6 files changed +8
-8
lines changed Expand file tree Collapse file tree 6 files changed +8
-8
lines changed Original file line number Diff line number Diff line change 11{
22 "cudaq": {
33 "repository": "NVIDIA/cuda-quantum",
4- "ref": "051eed70cb1426a5522140183c8e16cf72d51835 "
4+ "ref": "4f284af95b252e69ab08ba4f1ef7fa1ea69dd66a "
55 }
66}
Original file line number Diff line number Diff line change @@ -109,7 +109,7 @@ jobs:
109109 # Install the correct torch first.
110110 cuda_no_dot=$(echo ${{ matrix.cuda_version }} | sed 's/\.//')
111111 pip install torch==2.9.0 --index-url https://download.pytorch.org/whl/cu${cuda_no_dot}
112- pip install numpy pytest cupy-cuda${{ steps.config.outputs.cuda_major }}x cuquantum-cu${{ steps.config.outputs.cuda_major }} lightning ml_collections mpi4py transformers quimb opt_einsum nvidia-cublas cuquantum-python-cu${{ steps.config.outputs.cuda_major }}==25.09
112+ pip install numpy pytest cupy-cuda${{ steps.config.outputs.cuda_major }}x cuquantum-cu${{ steps.config.outputs.cuda_major }} lightning ml_collections mpi4py transformers quimb opt_einsum nvidia-cublas cuquantum-python-cu${{ steps.config.outputs.cuda_major }}==25.09.1
113113 # The following tests are needed for docs/sphinx/examples/qec/python/tensor_network_decoder.py.
114114 if [ "$(uname -m)" == "x86_64" ]; then
115115 # Stim is not currently available on manylinux ARM wheels, so only
Original file line number Diff line number Diff line change @@ -133,7 +133,7 @@ jobs:
133133 # Install the correct torch first.
134134 cuda_no_dot=$(echo ${{ matrix.cuda_version }} | sed 's/\.//')
135135 pip install torch==2.9.0 --index-url https://download.pytorch.org/whl/cu${cuda_no_dot}
136- pip install numpy pytest cupy-cuda${{ steps.config.outputs.cuda_major }}x cuquantum-cu${{ steps.config.outputs.cuda_major }} lightning ml_collections mpi4py transformers quimb opt_einsum nvidia-cublas cuquantum-python-cu${{ steps.config.outputs.cuda_major }}==25.09
136+ pip install numpy pytest cupy-cuda${{ steps.config.outputs.cuda_major }}x cuquantum-cu${{ steps.config.outputs.cuda_major }} lightning ml_collections mpi4py transformers quimb opt_einsum nvidia-cublas cuquantum-python-cu${{ steps.config.outputs.cuda_major }}==25.09.1
137137 # The following tests are needed for docs/sphinx/examples/qec/python/tensor_network_decoder.py.
138138 if [ "$(uname -m)" == "x86_64" ]; then
139139 # Stim is not currently available on manylinux ARM wheels, so only
Original file line number Diff line number Diff line change @@ -106,7 +106,7 @@ jobs:
106106 # Install the correct torch first.
107107 cuda_no_dot=$(echo ${{ matrix.cuda_version }} | sed 's/\.//')
108108 pip install torch==2.9.0 --index-url https://download.pytorch.org/whl/cu${cuda_no_dot}
109- pip install numpy pytest cupy-cuda${{ steps.config.outputs.cuda_major }}x cuquantum-cu${{ steps.config.outputs.cuda_major }} quimb opt_einsum nvidia-cublas cuquantum-python-cu${{ steps.config.outputs.cuda_major }}==25.09
109+ pip install numpy pytest cupy-cuda${{ steps.config.outputs.cuda_major }}x cuquantum-cu${{ steps.config.outputs.cuda_major }} quimb opt_einsum nvidia-cublas cuquantum-python-cu${{ steps.config.outputs.cuda_major }}==25.09.1
110110 # The following tests are needed for docs/sphinx/examples/qec/python/tensor_network_decoder.py.
111111 if [ "$(uname -m)" == "x86_64" ]; then
112112 # Stim is not currently available on manylinux ARM wheels, so only
Original file line number Diff line number Diff line change @@ -56,7 +56,7 @@ tensor_network_decoder = [
5656 "quimb",
5757 "opt_einsum",
5858 "torch",
59- "cuquantum-python-cu12==25.09"
59+ "cuquantum-python-cu12==25.09.1 "
6060]
6161trt_decoder = [
6262 "tensorrt-cu12"
@@ -65,6 +65,6 @@ all = [
6565 "quimb",
6666 "opt_einsum",
6767 "torch",
68- "cuquantum-python-cu12==25.09",
68+ "cuquantum-python-cu12==25.09.1 ",
6969 "tensorrt-cu12; platform_machine == 'x86_64'"
7070]
Original file line number Diff line number Diff line change @@ -56,7 +56,7 @@ tensor_network_decoder = [
5656 "quimb",
5757 "opt_einsum",
5858 "torch>=2.9.0",
59- "cuquantum-python-cu13==25.09"
59+ "cuquantum-python-cu13==25.09.1 "
6060]
6161trt_decoder = [
6262 "tensorrt-cu13"
@@ -65,6 +65,6 @@ all = [
6565 "quimb",
6666 "opt_einsum",
6767 "torch>=2.9.0",
68- "cuquantum-python-cu13==25.09",
68+ "cuquantum-python-cu13==25.09.1 ",
6969 "tensorrt-cu13"
7070]
You can’t perform that action at this time.
0 commit comments