diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 000000000..d1a8192b1
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,13 @@
+[submodule "big-ann-benchmarks"]
+ path = big-ann-benchmarks
+ url = https://github.com/intellistream/big-ann-benchmarks.git
+[submodule "GTI"]
+ path = GTI
+ url = https://github.com/MingqiWang-coder/GTI-Graph-based-Tree-Index.git
+[submodule "DiskANN"]
+ path = DiskANN
+ url = https://github.com/MingqiWang-coder/DiskANN.git
+ branch = diskv2
+[submodule "IP-DiskANN"]
+ path = IP-DiskANN
+ url = https://github.com/intellistream/IP-DiskANN.git
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 98840c33d..f05d6a874 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,23 +1,24 @@
#set(CMAKE_C_COMPILER "/usr/bin/gcc-11")
#set(CMAKE_CXX_COMPILER "/usr/bin/g++-11")
#set(CMAKE_CUDA_HOST_COMPILER "${CMAKE_CXX_COMPILER}")
-cmake_minimum_required(VERSION 3.27)
+#cmake_minimum_required(VERSION 3.27)
+cmake_minimum_required(VERSION 3.14)
project(CANDYBENCH CXX)
-option(ENABLE_CUDA "Enable cuda" ON)
-message(STATUS "1.0 CUDA enabled: ${ENABLE_CUDA}")
-include (cmake/FindCuda.cmake)
+#option(ENABLE_CUDA "Enable cuda" ON)
+#message(STATUS "1.0 CUDA enabled: ${ENABLE_CUDA}")
+# include (cmake/FindCuda.cmake)
include (cmake/FindTorch.cmake)
-#set(CMAKE_CUDA_ARCHITECTURES "70;75;80")
-set(CMAKE_CUDA_ARCHITECTURES ALL)
find_package(Torch REQUIRED)
+include_directories(${Torch_INCLUDE_DIRS})
+include_directories("/usr/local/lib/python3.10/dist-packages/torch/include/torch/csrc/api/include")
+include_directories("/usr/local/lib/python3.10/dist-packages/torch/include")
+
find_package(Python3 REQUIRED COMPONENTS Development)
include_directories(${Python3_INCLUDE_DIRS})
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
-
-
include_directories (${gflags_INCLUDE_DIR})
set(CMAKE_VERBOSE_MAKEFILE OFF)
set(CMAKE_RULE_MESSAGES OFF)
@@ -35,16 +36,16 @@ include(cmake/default.cmake)
#test avx2
# Option to enable/disable CUDA
-message(STATUS "2.0 CUDA enabled: ${ENABLE_CUDA}")
-if (ENABLE_CUDA)
- enable_language(CUDA)
- set(CMAKE_CUDA_STANDARD 20)
- set(CMAKE_CUDA_ARCHITECTURES OFF)
- add_definitions(-DENABLE_CUDA=1)
- message(STATUS "CUDA is enabled")
-else()
- message(STATUS "CUDA is not enabled")
-endif ()
+#message(STATUS "2.0 CUDA enabled: ${ENABLE_CUDA}")
+#if (ENABLE_CUDA)
+# enable_language(CUDA)
+# set(CMAKE_CUDA_STANDARD 20)
+# set(CMAKE_CUDA_ARCHITECTURES OFF)
+# add_definitions(-DENABLE_CUDA=1)
+# message(STATUS "CUDA is enabled")
+#else()
+# message(STATUS "CUDA is not enabled")
+#endif ()
add_subdirectory(thirdparty/faiss)
@@ -52,15 +53,12 @@ add_subdirectory(thirdparty/faiss)
#target_compile_options(faiss PRIVATE "-fno-openmp")
set(LIBRARIES ${LIBRARIES} faiss)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
-set(LIBRARIES ${LIBRARIES} ${TORCH_LIBRARIES})
-
-# Set Optimization Flags
-set(CMAKE_CXX_FLAGS "-std=c++20 -Wall -Werror=return-type -Wno-interference-size")
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall")
set(CMAKE_CXX_FLAGS_DEBUG "-g -O0 -DNO_RACE_CHECK -DCANDY_DEBUG_MODE=1")
set(CMAKE_CXX_FLAGS_RELEASE "-Wno-ignored-qualifiers -Wno-sign-compare -O3")
-set(PROJECT_BINARY_DIR_RAW ${PROJECT_BINARY_DIR})
+set(PROJECT_BINARY_DIR_RAW ${PROJECT_BINARY_DIR})
# Valid values are "generic", "avx2", "avx512".
detect_avx512_support(AVX512_AVAILABLE)
@@ -311,12 +309,6 @@ set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
message(STATUS "CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}")
message(STATUS "CMAKE_CXX_FLAGS_DEBUG ${CMAKE_CXX_FLAGS_DEBUG}")
message(STATUS "CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE}")
-#pytorch
-#set(Torch_DIR "/home/tony/.local/lib/python3.10/site-packages/torch/share/cmake" )
-# Log4cc
-#find_package(Log4cxx REQUIRED)
-#include_directories(${Log4cxx_INCLUDE_DIR})
-#set(LIBRARIES ${LIBRARIES} ${Log4cxx_LIBRARY})
option(ENABLE_UNIT_TESTS "Enable unit tests" OFF)
@@ -331,10 +323,6 @@ foreach (dir ${dirs})
endforeach ()
-#add_subdirectory(pytorchNN)
-# Add Source Code
-
-
add_subdirectory(src)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/)
@@ -342,43 +330,42 @@ set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/)
# Add Library
get_sources(CANDY_SOURCE_FILES)
get_headers(CANDY_HEADER_FILES)
-
-if (ENABLE_CUDA)
- set_source_files_properties(src/CANDY/IndexTable.cpp
- PROPERTIES
- LANGUAGE CUDA
- )
-endif ()
-
-add_library(CANDYBENCH SHARED ${CANDY_SOURCE_FILES} ${CANDY_HEADER_FILES} ${CMAKE_CURRENT_BINARY_DIR}
- src/CANDY/IndexTable.cpp)
+add_library(CANDYBENCH SHARED ${CANDY_SOURCE_FILES} ${CANDY_HEADER_FILES} ${CMAKE_CURRENT_BINARY_DIR})
+#if (ENABLE_CUDA)
+# set_source_files_properties(src/CANDY/IndexTable.cpp
+# PROPERTIES
+# LANGUAGE CUDA
+# )
+#endif ()
+#
+#add_library(CANDYBENCH SHARED ${CANDY_SOURCE_FILES} ${CANDY_HEADER_FILES} ${CMAKE_CURRENT_BINARY_DIR}
+# src/CANDY/IndexTable.cpp)
set_property(TARGET CANDYBENCH PROPERTY CXX_STANDARD 20)
target_include_directories(CANDYBENCH PUBLIC "include")
-if (ENABLE_CUDA)
- set(LIBRARIES ${LIBRARIES} cublas cudart)
- set_target_properties(CANDYBENCH PROPERTIES
- CUDA_STANDARD 20
- CXX_STANDARD 20
- )
-else ()
- set_target_properties(CANDYBENCH PROPERTIES
- CXX_STANDARD 20
- )
-endif ()
+#if (ENABLE_CUDA)
+# set(LIBRARIES ${LIBRARIES} cublas cudart)
+# set_target_properties(CANDYBENCH PROPERTIES
+# CUDA_STANDARD 20
+# CXX_STANDARD 20
+# )
+#else ()
+# set_target_properties(CANDYBENCH PROPERTIES
+# CXX_STANDARD 20
+# )
+#endif ()
# 设置 MKL 库的路径
-set(MKL_INCLUDE_DIR "/usr/include/mkl")
-set(MKL_LIB_DIR "/usr/lib/x86_64-linux-gnu")
-#set(MPI_INCLUDE_PATH "/usr/include/openmpi-x86_64")
-#set(MPI_LIBRARIES "/usr/lib/x86_64-linux-gnu/openmpi/lib/libmpi.so")
-
+set(MKL_ROOT /opt/intel/oneapi/mkl/latest)
+set(MKL_INCLUDE_DIR /opt/intel/oneapi/mkl/latest/include)
+set(MKL_LIB_DIR /opt/intel/oneapi/mkl/latest/lib/intel64)
set(MKL_LIBRARIES
- "${MKL_LIB_DIR}/libmkl_intel_lp64.so"
- "${MKL_LIB_DIR}/libmkl_sequential.so"
- "${MKL_LIB_DIR}/libmkl_core.so"
+ "${MKL_LIB_DIR}/libmkl_intel_lp64.so"
+ "${MKL_LIB_DIR}/libmkl_sequential.so"
+ "${MKL_LIB_DIR}/libmkl_core.so"
)
+
target_include_directories(CANDYBENCH PUBLIC ${MKL_INCLUDE_DIR})
# MKL 和其他库的链接
target_link_libraries(CANDYBENCH PUBLIC
diff --git a/DiskANN b/DiskANN
new file mode 160000
index 000000000..b7a3b768f
--- /dev/null
+++ b/DiskANN
@@ -0,0 +1 @@
+Subproject commit b7a3b768f7f690f48765420fcfe6d76bfb661966
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 000000000..b6056298e
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,86 @@
+FROM ubuntu:22.04
+
+WORKDIR /app
+
+COPY . /app
+
+RUN DEBIAN_FRONTEND=noninteractive apt-get update && \
+ apt-get install -y --no-install-recommends \
+ python3 python3-pip git build-essential \
+ liblapack-dev libblas-dev libopenblas-dev \
+ libboost-all-dev \
+ libnuma-dev \
+ libgflags-dev libgoogle-glog-dev \
+ swig \
+ libhdf5-dev \
+ libaio-dev \
+ libgoogle-perftools-dev \
+ libomp-dev \
+ libtbb-dev \
+ libarchive-dev \
+ libcurl4-openssl-dev \
+ wget \
+ curl \
+ gnupg \
+ libfmt-dev \
+ python3-dev \
+ libeigen3-dev \
+ libspdlog-dev \
+ pybind11-dev \
+ pkg-config \
+ zlib1g-dev \
+ libssl-dev \
+ gfortran \
+ && rm -rf /var/lib/apt/lists/* && \
+ ldconfig
+
+RUN wget https://github.com/Kitware/CMake/releases/download/v3.30.2/cmake-3.30.2-linux-x86_64.sh -O cmake.sh && \
+ chmod +x cmake.sh && \
+ ./cmake.sh --skip-license --prefix=/usr/local && \
+ rm cmake.sh && \
+ ln -sf /usr/local/bin/cmake /usr/bin/cmake
+
+RUN wget -qO - https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor -o /usr/share/keyrings/oneapi-archive-keyring.gpg && \
+ echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" | tee /etc/apt/sources.list.d/oneAPI.list && \
+ apt-get update && \
+ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
+ intel-oneapi-mkl-devel \
+ && rm -rf /var/lib/apt/lists/*
+
+ENV MKLROOT="/opt/intel/oneapi/mkl/latest"
+ENV LD_LIBRARY_PATH="${MKLROOT}/lib/intel64:${LD_LIBRARY_PATH}"
+
+RUN pip install --no-cache-dir \
+ torch==2.3.0+cpu \
+ torchvision==0.18.0+cpu \
+ torchaudio==2.3.0+cpu \
+ --index-url https://download.pytorch.org/whl/cpu
+
+ENV Torch_DIR="/usr/local/lib/python3.10/dist-packages/torch/share/cmake/Torch"
+
+WORKDIR /app
+RUN pip install .
+
+WORKDIR /app/GTI/GTI/extern_libraries/n2
+RUN mkdir -p build && make shared_lib
+
+WORKDIR /app/GTI/GTI
+RUN mkdir -p bin build && cd build && cmake -DCMAKE_BUILD_TYPE=Release .. && make -j && make install
+
+WORKDIR /app/DiskANN
+RUN mkdir -p build && cd build && \
+ cmake -DCMAKE_BUILD_TYPE=Release \
+ -DMKL_PATH=/opt/intel/oneapi/mkl/latest/lib/intel64 \
+ -DMKL_INCLUDE_PATH=/opt/intel/oneapi/mkl/latest/include \
+ .. && \
+ make -j && make install
+
+WORKDIR /app/IP-DiskANN
+RUN mkdir -p build && cd build && \
+ cmake -DCMAKE_BUILD_TYPE=Release \
+ -DMKL_PATH=/opt/intel/oneapi/mkl/latest/lib/intel64 \
+ -DMKL_INCLUDE_PATH=/opt/intel/oneapi/mkl/latest/include \
+ .. && \
+ make -j && make install
+
+CMD ["bash"]
\ No newline at end of file
diff --git a/GTI b/GTI
new file mode 160000
index 000000000..a21789816
--- /dev/null
+++ b/GTI
@@ -0,0 +1 @@
+Subproject commit a2178981626dce884c462e27477fc9ef9ad6ab1c
diff --git a/IP-DiskANN b/IP-DiskANN
new file mode 160000
index 000000000..ab06fe2c3
--- /dev/null
+++ b/IP-DiskANN
@@ -0,0 +1 @@
+Subproject commit ab06fe2c355a51d74cc3c4c06d0c50368edbf84d
diff --git a/README.md b/README.md
index 6d3eefafb..798ede746 100644
--- a/README.md
+++ b/README.md
@@ -1,22 +1,372 @@
-# CANDY
+# CANDOR-Bench: Benchmarking In-Memory Continuous ANNS under Dynamic Open-World Streams
-A library and benchmark suite for Approximate Nearest Neighbor Search (ANNS). This project is compatible with LibTorch.
+CANDOR-Bench (Continuous Approximate Nearest neighbor search under Dynamic Open-woRld Streams) is a benchmarking framework designed to evaluate in-memory ANNS algorithms under realistic, dynamic data stream conditions.
## Table of Contents
+- [Project Structure](#Project-Structure)
+- [Datasets and Algorithms](#Datasets-and-Algorithms)
+ - [Summary of Datasets](#Summary-of-Datasets)
+ - [Summary of Algorithms](#Summary-of-Algorithms)
- [Quick Start Guide](#quick-start-guide)
- - [Docker Support](#docker-support)
+ - [Build With Docker](#Build-With-Docker)
+ - [Usage](#Usage)
+
+- [Additional Information](#additional-information)
---
+## Project Structure
+
+```
+CANDY-Benchmark/
+├── benchmark/
+├── big-ann-benchmarks/ # Core benchmarking framework (Dynamic Open-World conditions)
+│ ├── benchmark/
+│ │ ├── algorithms/ # Concurrent Track
+│ │ ├── concurrent/ # Congestion Track
+│ │ ├── congestion/
+│ │ ├── main.py
+│ │ ├── runner.py
+│ │ └── ……
+│ ├── create_dataset.py
+│ ├── requirements_py3.10.txt
+│ ├── logging.conf
+│ ├── neurips21/
+│ ├── neurips23/ # NeurIPS'23 benchmark configurations and scripts
+│ │ ├── concurrent/ # Concurrent Track
+│ │ ├── congestion/ # Congestion Track
+│ │ ├── filter/
+│ │ ├── ood/
+│ │ ├── runbooks/ # Dynamic benchmark scenario definitions (e.g., T1, T3, etc.)
+│ │ ├── sparse/
+│ │ ├── streaming/
+│ │ └── ……
+│ └──……
+├── DiskANN/ # Integrated DiskANN-based algorithms
+├── GTI/ # Integrated GTI algorithm source
+├── IP-DiskANN/ # Integrated IP-DiskANN algorithm source
+├── src/ # Main algorithm implementations
+├── include/ # C++ header files
+├── thirdparty/ # External dependencies
+├── Dockerfile # Docker build recipe
+├── requirements.txt
+├── setup.py # Python package setup
+└── ……
+```
+## Datasets and Algorithms
+
+Our evaluation involves the following datasets and algorithms.
+
+### Summary of Datasets
+
+
+
+
+ Category
+ Name
+ Description
+ Dimension
+ Data Size
+ Query Size
+
+
+
+
+ Real-world
+ SIFT Image 128 1M 10K
+
+ OpenImagesStreaming Image 512 1M 10K
+ Sun Image 512 79K 200
+ SIFT100M Image 128 100M 10K
+ Trevi Image 4096 100K 200
+ Msong Audio 420 990K 200
+ COCO Multi-Modal 768 100K 500
+ Glove Text 100 1.192M 200
+ MSTuring Text 100 30M 10K
+
+ Synthetic
+ Gaussian i.i.d values Adjustable 500K 1000
+
+ Blob Gaussian Blobs 768 500K 1000
+ WTE Text 768 100K 100
+ FreewayML Constructed 128 100K 1K
+
+
+
+### Summary of Algorithms
+
+
+
+ Category
+ Algorithm Name
+ Description
+
+
+
+
+
+
+ Tree-based
+
+ SPTAG
+ Space-partitioning tree structure for efficient data segmentation.
+
+
+
+
+
+ LSH-based
+
+ LSH
+ Data-independent hashing to reduce dimensionality and approximate nearest neighbors.
+
+
+ LSHAPG
+ LSH-driven optimization using LSB-Tree to differentiate graph regions.
+
+
+
+
+
+ Clustering-based
+
+ PQ
+ Product quantization for efficient clustering into compact subspaces.
+
+
+ IVFPQ
+ Inverted index with product quantization for hierarchical clustering.
+
+
+ OnlinePQ
+ Incremental updates of centroids in product quantization for streaming data.
+
+
+ Puck
+ Non-orthogonal inverted indexes with multiple quantization optimized for large-scale datasets.
+
+
+ SCANN
+ Small-bit quantization to improve register utilization.
+
+
+
+
+
+ Graph-based
+
+ NSW
+ Navigable Small World graph for fast nearest neighbor search.
+
+
+ HNSW
+ Hierarchical Navigable Small World for scalable search.
+
+
+ FreshDiskANN
+ Streaming graph construction for large-scale proximity-based search with refined robust edge pruning.
+
+
+ MNRU
+ Enhances HNSW with efficient updates to prevent unreachable points in dynamic environments.
+
+
+ Cufe
+ Enhances FreshDiskANN with batched neighbor expansion.
+
+
+ Pyanns
+ Enhances FreshDiskANN with fix-sized huge pages for optimized memory access.
+
+
+ IPDiskANN
+ Enables efficient in-place deletions for FreshDiskANN, improving update performance without reconstructions.
+
+
+ GTI
+ Hybrid tree-graph indexing for efficient, dynamic high-dimensional search, with optimized updates and construction.
+
+
+ ParlayHNSW
+ Parallel, deterministic HNSW for improved scalability and performance.
+
+
+ ParlayVamana
+ Parallel, deterministic FreshDiskANN implementation using Vamana for graph construction, with performance improvement.
+
+
+
+
## Quick Start Guide
+---
+# 🚨🚨 Strong Recommendation: Use Docker! 🚨🚨
+
+> **We strongly recommend using Docker to build and run this project.**
+>
+> There are many algorithm libraries with complex dependencies. Setting up the environment locally can be difficult and error-prone.
+> **Docker provides a consistent and reproducible environment, saving you time and avoiding compatibility issues.**
+>
+> **Note:** Building the Docker image may take **10–20 minutes** depending on your network and hardware.
+
+---
+
+### Build With Docker
+To build the project using Docker, simply use the provided Dockerfile located in the root directory. This ensures a consistent and reproducible environment for all dependencies and build steps.
+
+1. To initialize and update all submodules in the project, you can run:
+```
+git submodule update --init --recursive
+```
+2. You can build the Docker image with:
+```
+docker build -t .
+```
+3. Once the image is built, you can run a container from it using the following command.
+```
+docker run -it
+```
+4. After entering the container, navigate to the project directory:
+```
+cd /app/big-ann-benchmarks
+```
+
+
+### Usage
+
+All the following operations are performed in the root directory of big-ann-benchmarks.
+
+#### 2.1 Preparing dataset
+Create a small, sample dataset. For example, to create a dataset with 10000 20-dimensional random floating point vectors, run:
+```
+python create_dataset.py --dataset random-xs
+```
+To see a complete list of datasets, run the following:
+```
+python create_dataset.py --help
+```
+
+#### 2.2 Running Algorithms on the **congestion** Track
+
+To evaluate an algorithm under the `congestion` track, use the following command:
+```bash
+python3 run.py \
+ --neurips23track congestion \
+ --algorithm "$ALGO" \
+ --nodocker \
+ --rebuild \
+ --runbook_path "$PATH" \
+ --dataset "$DS"
+```
+- algorithm "$ALGO": Name of the algorithm to evaluate.
+- dataset "$DS": Name of the dataset to use.
+- runbook_path "$PATH": Path to the runbook file describing the test scenario.
+- rebuild: Rebuild the target before running.
+
+#### 2.3 Computing Ground Truth for Runbooks
+
+To compute ground truth for an runbook:
+1. **Clone and build the [DiskANN repository](https://github.com/Microsoft/DiskANN)**
+2. Use the provided script to compute ground truth at various checkpoints:
+```
+python3 benchmark/congestion/compute_gt.py \
+ --runbook "$PATH_TO_RUNBOOK" \
+ --dataset "$DATASET_NAME" \
+ --gt_cmdline_tool ~/DiskANN/build/apps/utils/compute_groundtruth
+```
+
+#### 2.4 Exporting Results
+1. To make the results available for post-processing, change permissions of the results folder
+```
+sudo chmod 777 -R results/
+```
+2. The following command will summarize all results files into a single csv file
+```
+python data_export.py --out "$OUT" --track congestion
+```
+The `--out` path "$OUT" should be adjusted according to the testing scenario. Common values include:
+- `gen`
+- `batch`
+- `event`
+- `conceptDrift`
+- `randomContamination`
+- `randomDrop`
+- `wordContamination`
+- `bulkDeletion`
+- `batchDeletion`
+- `multiModal`
+- ……
## Additional Information
@@ -161,253 +576,8 @@ Figures will be generated in the `figures` directory.
- [Generate Documentation](#generate-documentation)
- [Accessing Documentation](#accessing-documentation)
- [Known Issues](#known-issues)
+>>>>>>> dd068b958060f24e4d0c2cdf899f229efccc0b2b
---
-### Extra CMake Options
-
-You can set additional CMake options using `cmake -D=ON/OFF`:
-
-- `ENABLE_PAPI` (OFF by default)
- - Enables PAPI-based performance tools.
- - **Setup**:
- - Navigate to the `thirdparty` directory.
- - Run `installPAPI.sh` to enable PAPI support.
- - Alternatively, set `REBUILD_PAPI` to `ON`.
-- `ENABLE_HDF5` (OFF by default)
- - Enables loading data from HDF5 files.
- - The HDF5 source code is included; no extra dependency is required.
-- `ENABLE_PYBIND` (OFF by default)
- - Enables building Python bindings (PyCANDY).
- - Ensure the `pybind11` source code in the `thirdparty` folder is complete.
-
-### Manual Build Instructions
-
-#### Requirements
-
-- **Compiler**: G++11 or newer.
- - The default `gcc/g++` version on Ubuntu 22.04 (Jammy) is sufficient.
-- **BLAS and LAPACK**:
- ```shell
- sudo apt install liblapack-dev libblas-dev
- ```
-- **Graphviz (Optional)**:
- ```shell
- sudo apt-get install graphviz
- pip install torchviz
- ```
-
-#### Build Steps
-
-1. **Set the CUDA Compiler Path** (if using CUDA):
-
- ```shell
- export CUDACXX=/usr/local/cuda/bin/nvcc
- ```
-
-2. **Create Build Directory**:
-
- ```shell
- mkdir build && cd build
- ```
-
-3. **Configure CMake**:
-
- ```shell
- cmake -DCMAKE_PREFIX_PATH=`python3 -c 'import torch; print(torch.utils.cmake_prefix_path)'` ..
- ```
-
-4. **Build the Project**:
-
- ```shell
- make
- ```
-
-**For Debug Build**:
-
-```shell
-cmake -DCMAKE_BUILD_TYPE=Debug -DCMAKE_PREFIX_PATH=`python3 -c 'import torch; print(torch.utils.cmake_prefix_path)'` ..
-make
-```
-
-#### CLion Build Tips
-
-- Manually retrieve the CMake prefix path:
-
- ```shell
- python3 -c 'import torch; print(torch.utils.cmake_prefix_path)'
- ```
-
-- Set the `-DCMAKE_PREFIX_PATH` in CLion's CMake settings.
-- Set the environment variable `CUDACXX` to `/usr/local/cuda/bin/nvcc` in CLion.
-
-### CUDA Installation (Optional)
-
-#### Install CUDA (if using CUDA-based Torch)
-
-Refer to the [NVIDIA CUDA Installation Guide](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#ubuntu) for more details.
-
-```shell
-wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.0-1_all.deb
-sudo dpkg -i cuda-keyring_1.0-1_all.deb
-sudo apt-get update
-sudo apt-get install cuda
-sudo apt-get install nvidia-gds
-sudo apt-get install libcudnn8 libcudnn8-dev libcublas-11-7
-```
-
-**Note**: Ensure CUDA is installed before installing CUDA-based Torch. Reboot your system after installation.
-
-#### CUDA on Jetson Devices
-
-- No need to install CUDA if using a pre-built JetPack on Jetson.
-- Ensure `libcudnn8` and `libcublas` are installed:
-
- ```shell
- sudo apt-get install libcudnn8 libcudnn8-dev libcublas-*
- ```
-
-### Torch Installation
-
-Refer to the [PyTorch Get Started Guide](https://pytorch.org/get-started/locally/) for more details.
-
-#### Install Python and Pip
-
-```shell
-sudo apt-get install python3 python3-pip
-```
-
-#### Install PyTorch
-
-- **With CUDA**:
-
- ```shell
- pip3 install torch==2.4.0 torchvision torchaudio
- ```
-
-- **Without CUDA**:
-
- ```shell
- pip3 install --ignore-installed torch==2.4.0 torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
- ```
-
-**Note**: Conflict between `torch2.4.0+cpu` and `torchaudio+cpu` may occur with Python versions > 3.10.
-
-### PAPI Support (Optional)
-
-PAPI provides a consistent interface for collecting performance counter information.
-
-#### Build PAPI
-
-- Navigate to the `thirdparty` directory.
-- Run `installPAPI.sh`.
-- PAPI will be compiled and installed in `thirdparty/papi_build`.
-
-#### Verify PAPI Installation
-
-- Navigate to `thirdparty/papi_build/bin`.
-- Run `sudo ./papi_avail` to check available events.
-- Run `./papi_native_avail` to view native events.
-
-#### Enable PAPI in CANDY
-
-- Set `-DENABLE_PAPI=ON` when configuring CMake.
-- Add the following to your top-level config file:
-
- ```
- usePAPI,1,U64
- perfUseExternalList,1,U64
- ```
-
-- To specify custom event lists, set:
-
- ```
- perfListSrc,,String
- ```
-
-- Edit `perfLists/perfList.csv` in your build directory to include desired events.
-
-### Distributed CANDY with Ray (Optional)
-
-#### Build with Ray Support
-
-1. **Install Ray**:
-
- ```shell
- pip install ray==2.8.1 ray-cpp==2.8.1
- ```
-
-2. **Get Ray Library Path**:
-
- ```shell
- ray cpp --show-library-path
- ```
-
-3. **Set `RAYPATH` Environment Variable**:
-
- ```shell
- export RAYPATH=
- ```
-
-4. **Configure CMake**:
-
- ```shell
- cmake -DENABLE_RAY=ON ..
- ```
-
-#### Running with Ray
-
-- **Start the Head Node**:
-
- ```shell
- ray start --head
- ```
-
-- **Start Worker Nodes**:
-
- ```shell
- ray start --address :6379 --node-ip-address
- ```
-
-- **Run the Program**:
-
- ```shell
- export RAY_ADDRESS=:6379
- ./
- ```
-
-**Notes**:
-
-- Ensure the file paths and dependencies are identical across all nodes.
-- For different architectures, recompile the source code on each node.
-- `torch::Tensor` may not be serializable; consider using `std::vector` instead.
-
-#### Ray Dashboard (Optional)
-
-Refer to the [Ray Observability Guide](https://docs.ray.io/en/latest/ray-observability/getting-started.html#observability-getting-started) to set up a dashboard.
-
-### Local Documentation Generation (Optional)
-
-#### Install Required Packages
-
-```shell
-sudo apt-get install doxygen graphviz
-sudo apt-get install texlive-latex-base texlive-fonts-recommended texlive-fonts-extra texlive-latex-extra
-```
-
-#### Generate Documentation
-
-```shell
-./genDoc.SH
-```
-
-##### Accessing Documentation
-
-- **HTML Pages**: Located in `doc/html/index.html`.
-- **PDF Manual**: Found at `refman.pdf` in the root directory.
-
-### Known Issues
-
-- Conflicts may occur with certain versions of PyTorch and Python.
-
-
\ No newline at end of file
+## 2. Algorithm and Datasets
diff --git a/big-ann-benchmarks b/big-ann-benchmarks
new file mode 160000
index 000000000..9e34ab58e
--- /dev/null
+++ b/big-ann-benchmarks
@@ -0,0 +1 @@
+Subproject commit 9e34ab58e9ac227a4b9d0d227567974f27089846
diff --git a/cmake/FindCuda.cmake b/cmake/FindCuda.cmake
index ce07b3974..f42de92b6 100644
--- a/cmake/FindCuda.cmake
+++ b/cmake/FindCuda.cmake
@@ -42,7 +42,7 @@ function(find_valid_cuda MIN_CUDA_VERSION MAX_CUDA_VERSION)
# If no valid CUDA was found, print a warning
if (NOT VALID_CUDA_FOUND)
- set(ENABLE_CUDA OFF PARENT_SCOPE)
+# set(ENABLE_CUDA OFF PARENT_SCOPE)
message(WARNING "No valid CUDA compiler found in the range ${MIN_CUDA_VERSION} - ${MAX_CUDA_VERSION}. I don't think you can use CUDA...")
else()
set(ENV{CUDACXX} ${CUDA_COMPILER_PATH})
@@ -51,4 +51,5 @@ function(find_valid_cuda MIN_CUDA_VERSION MAX_CUDA_VERSION)
endfunction()
# Call the function to find valid CUDA compilers with a specific version range
-find_valid_cuda("11.0" "12.6")
+#find_valid_cuda("11.0" "12.6")
+find_valid_cuda("11.0" "12.5")
\ No newline at end of file
diff --git a/docker/Dockerfile b/docker/Dockerfile
deleted file mode 100644
index 8e9780412..000000000
--- a/docker/Dockerfile
+++ /dev/null
@@ -1,27 +0,0 @@
-# Base image with CUDA 11.7 and Python 3.10
-FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu22.04
-
-# Install essential packages and dependencies
-RUN apt-get update && apt-get install -y --allow-change-held-packages \
- sudo \
- build-essential \
- cmake \
- curl \
- unzip \
- liblapack-dev \
- libblas-dev \
- libboost-dev \
- graphviz \
- python3 \
- python3-pip \
- && apt-get clean \
- && rm -rf /var/lib/apt/lists/*
-
-# Install Python dependencies
-RUN pip3 install matplotlib pandas==2.0.0
-
-# Set up CUDA environment variables
-ENV CUDACXX=/usr/local/cuda/bin/nvcc
-
-# Default command to run
-CMD ["bash"]
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
deleted file mode 100644
index 5c7f23738..000000000
--- a/docker/docker-compose.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-services:
- candy:
- build: ./
- environment:
- - CUDACXX=/usr/local/cuda/bin/nvcc
- volumes:
- - "..:/workspace"
- working_dir: /workspace
- entrypoint: /bin/bash
- stdin_open: true
- tty: true
diff --git a/docker/start.sh b/docker/start.sh
deleted file mode 100644
index 5acc08918..000000000
--- a/docker/start.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-
-# Build and run the Docker container
-docker-compose build
-docker compose run candy
-
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 000000000..710bf7b15
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,11 @@
+torch --index-url https://download.pytorch.org/whl/cu121
+numpy
+cmake
+matplotlib>=3.5.0
+pandas
+scipy
+scikit-learn
+h5py
+pyyaml
+psutil
+jinja2
diff --git a/setup.py b/setup.py
index 61fe1fe24..2c61299c3 100644
--- a/setup.py
+++ b/setup.py
@@ -5,6 +5,7 @@
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import glob
+import platform
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=""):
@@ -18,12 +19,7 @@ def run(self):
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
- ", ".join(e.name for e in self.extensions))
-
- # Set environment variables
- os.environ['CUDACXX'] = '/usr/local/cuda/bin/nvcc'
- if sys.platform == 'linux':
- os.environ['LD_LIBRARY_PATH'] = '/path/to/custom/libs:' + os.environ.get('LD_LIBRARY_PATH', '')
+ ", ".join(e.name for e in self.extensions))
for ext in self.extensions:
self.build_extension(ext)
@@ -32,58 +28,120 @@ def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
debug = int(os.environ.get("DEBUG", 0)) if self.debug is None else self.debug
cfg = "Debug" if debug else "Release"
- os.system("python3 -c 'import torch;print(torch.utils.cmake_prefix_path)' >> 1.txt")
- with open('1.txt', 'r') as file:
- torchCmake = file.read().rstrip('\n')
- os.system('rm 1.txt')
- os.system('nproc >> 1.txt')
- with open('1.txt', 'r') as file:
- threads = file.read().rstrip('\n')
- threads = str(2)
- os.system('rm 1.txt')
- #os.system('cd thirdparty&&./makeClean.sh&&./installPAPI.sh')
- print(threads)
- cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
- '-DPYTHON_EXECUTABLE=' + sys.executable,
- '-DCMAKE_PREFIX_PATH='+torchCmake,
- '-DENABLE_HDF5=OFF',
- '-DENABLE_PYBIND=ON',
- '-DCMAKE_INSTALL_PREFIX=/usr/local/lib',
- '-DENABLE_PAPI=OFF',
- '-DENABLE_SPTAG=ON',
- '-DENABLE_PUCK=ON',
- '-DENABLE_DiskANN=ON',
- '-DPYBIND=ON',
- f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}{os.sep}",
- f"-DPYTHON_EXECUTABLE={sys.executable}",
- f"-DCMAKE_BUILD_TYPE={cfg}", # not used on MSVC, but no harm
- f"-DVERSION_INFO={self.distribution.get_version()}" # commented out, we want this set in the CMake file
- ]
-
- cfg = 'Debug' if self.debug else 'Release'
- cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
+
+ # Get PyTorch CMake path
+ try:
+ torch_cmake_prefix = os.environ.get('Torch_DIR')
+ if not torch_cmake_prefix:
+ torch_cmake_prefix = subprocess.check_output(
+ [sys.executable, "-c", "import torch; print(torch.utils.cmake_prefix_path)"],
+ text=True,
+ encoding='utf-8'
+ ).strip()
+ print(f"DEBUG: Using torch_cmake_prefix: {torch_cmake_prefix}")
+ except subprocess.CalledProcessError as e:
+ print(f"Error getting torch cmake path: {e}")
+ sys.exit(1)
+
+ # Get CPU core count
+ try:
+ threads_output = subprocess.check_output(["nproc"], text=True, encoding='utf-8').strip()
+ threads = threads_output if threads_output.isdigit() else "2"
+ except subprocess.CalledProcessError:
+ threads = "2"
+
+ print(f"Using {threads} build threads.")
+
+ cmake_args = [
+ f'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}{os.sep}',
+ f'-DPYTHON_EXECUTABLE={sys.executable}',
+ f'-DCMAKE_PREFIX_PATH={torch_cmake_prefix}',
+ f'-DCMAKE_BUILD_TYPE={cfg}',
+ f'-DVERSION_INFO={self.distribution.get_version()}',
+
+ '-DENABLE_HDF5=OFF',
+ '-DENABLE_PYBIND=ON',
+ '-DCMAKE_INSTALL_PREFIX=/usr/local/lib',
+ '-DENABLE_PAPI=OFF',
+ '-DENABLE_SPTAG=ON',
+ '-DENABLE_DiskANN=ON',
+ '-DPYBIND=ON',
+ ]
+
+ # MKL path
+ mkl_base_path = os.environ.get('MKLROOT')
+ if not mkl_base_path:
+ raise RuntimeError("MKLROOT environment variable is not set. Please ensure Intel oneAPI MKL is installed and MKLROOT is configured in Dockerfile.")
+
+ cmake_args.append(f'-DMKL_PATH={mkl_base_path}')
+ cmake_args.append(f'-DMKL_INCLUDE_PATH={mkl_base_path}/include')
+
+ print(f"DEBUG: Setting -DMKL_PATH={mkl_base_path}")
+ print(f"DEBUG: Setting -DMKL_INCLUDE_PATH={mkl_base_path}/include")
build_args = ['--config', cfg]
- build_args += ['--', '-j'+threads]
- if not os.path.exists(self.build_temp):
- os.makedirs(self.build_temp)
- subprocess.run(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp,check=True)
- subprocess.run(['cmake', '--build', '.'] + build_args, cwd=self.build_temp,check=True)
- # Now copy all *.so files from the build directory to the final installation directory
- so_files = glob.glob(os.path.join(self.build_temp, '*.so'))
- print("so_files:")
+ build_args += ['--', '-j' + threads]
+
+ self.build_temp = os.path.join(self.build_temp, ext.name)
+ os.makedirs(self.build_temp, exist_ok=True)
+ print(f"DEBUG: Using build directory: {self.build_temp}")
+
+ print(f"DEBUG: CMake configure command: {['cmake', ext.sourcedir] + cmake_args}")
+ try:
+ subprocess.run(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, check=True)
+ except subprocess.CalledProcessError as e:
+ print(f"CMake configuration failed with error: {e}")
+ print(f"STDOUT:\n{e.stdout.decode() if e.stdout else ''}")
+ print(f"STDERR:\n{e.stderr.decode() if e.stderr else ''}")
+ raise
+
+ print(f"DEBUG: CMake build command: {['cmake', '--build', '.'] + build_args}")
+ try:
+ result = subprocess.run(['cmake', '--build', '.'] + build_args, cwd=self.build_temp, capture_output=True, text=True)
+ print("--- CMake Build STDOUT ---")
+ print(result.stdout)
+ print("--- CMake Build STDERR ---")
+ print(result.stderr)
+ result.check_returncode()
+ except subprocess.CalledProcessError as e:
+ print(f"CMake build failed with error: {e}")
+ print(f"STDOUT:\n{e.stdout}")
+ print(f"STDERR:\n{e.stderr}")
+ raise
+
+ lib_name = "libCANDYBENCH.so"
+ py_lib_name = "PyCANDYAlgo.so"
+
+ so_files = glob.glob(os.path.join(self.build_temp, py_lib_name))
+ if not so_files:
+ so_files = glob.glob(os.path.join(self.build_temp, lib_name))
+
+ print(f"Discovered .so files in {self.build_temp}:")
print(so_files)
+
+ if not so_files:
+ print(f"WARNING: No .so files found in {self.build_temp}. Checking common subdirectories...")
+ so_files = glob.glob(os.path.join(self.build_temp, 'lib', '*.so'))
+ if not so_files:
+ so_files = glob.glob(os.path.join(self.build_temp, 'bin', '*.so'))
+ if not so_files:
+ print("WARNING: Still no .so files found in common subdirectories. Build might have failed or output path is unusual.")
+
+
for file in so_files:
+ target_path = os.path.join(extdir, os.path.basename(file))
+ print(f"Copying {file} to {target_path}")
shutil.copy(file, extdir)
+
setup(
name='PyCANDYAlgo',
version='0.1',
author='Your Name',
description='A simple python version of CANDY benchmark built with Pybind11 and CMake',
long_description='',
- ext_modules=[CMakeExtension('.')],
+ ext_modules=[CMakeExtension('PyCANDYAlgo', sourcedir=".")],
cmdclass={
'build_ext': CMakeBuild,
},
zip_safe=False,
-)
+)
\ No newline at end of file
diff --git a/src/CANDY/CMakeLists.txt b/src/CANDY/CMakeLists.txt
index 89d631f59..d184571f8 100644
--- a/src/CANDY/CMakeLists.txt
+++ b/src/CANDY/CMakeLists.txt
@@ -10,7 +10,7 @@ add_sources(
OnlinePQIndex.cpp
#OnlineIVFLSHIndex.cpp
#OnlineIVFL2HIndex.cpp
- # IndexTable.cpp
+ IndexTable.cpp
#PQIndex.cpp
HNSWNaiveIndex.cpp
FaissIndex.cpp
@@ -35,9 +35,9 @@ add_subdirectory(FlannIndex)
add_subdirectory(LSHAPGIndex)
add_subdirectory(FlatGPUIndex)
-if (ENABLE_CUDA)
- add_subdirectory(SONG)
-endif ()
+#if (ENABLE_CUDA)
+# add_subdirectory(SONG)
+#endif ()
if (ENABLE_RAY)
add_subdirectory(DistributedPartitionIndex)
diff --git a/src/CANDY/IndexTable.cpp b/src/CANDY/IndexTable.cpp
index f4444f265..1f5eecdf9 100644
--- a/src/CANDY/IndexTable.cpp
+++ b/src/CANDY/IndexTable.cpp
@@ -7,11 +7,11 @@
#include
#include
#include
-#include
+// #include
//#include
//#include
#include
-#include
+// #include
#include
#include
#include
@@ -21,7 +21,7 @@
//#include
#include
//#include
-#include
+// #include
//#include
#include
#include
@@ -35,9 +35,9 @@
#if CANDY_SPTAG == 1
#include
#endif
-#ifdef ENABLE_CUDA
-#include
-#endif
+//#ifdef ENABLE_CUDA
+// #include
+//#endif
namespace CANDY {
CANDY::IndexTable::IndexTable() {
indexMap["null"] = newAbstractIndex();
@@ -50,21 +50,21 @@ CANDY::IndexTable::IndexTable() {
//indexMap["onlineIVFLSH"] = newOnlineIVFLSHIndex();
//indexMap["onlineIVFL2H"] = newOnlineIVFL2HIndex();
//indexMap["PQ"] = newPQIndex();
- indexMap["HNSWNaive"] = newHNSWNaiveIndex();
- indexMap["NSW"] = newNSWIndex();
+ // indexMap["HNSWNaive"] = newHNSWNaiveIndex();
+ // indexMap["NSW"] = newNSWIndex();
indexMap["faiss"] = newFaissIndex();
//indexMap["yinYang"] = newYinYangGraphIndex();
//indexMap["yinYangSimple"] = newYinYangGraphSimpleIndex();
indexMap["congestionDrop"] = newCongestionDropIndex();
indexMap["bufferedCongestionDrop"] = newBufferedCongestionDropIndex();
indexMap["nnDescent"] = newNNDescentIndex();
- indexMap["Flann"] = newFlannIndex();
+ // indexMap["Flann"] = newFlannIndex();
indexMap["DPG"] = newDPGIndex();
indexMap["LSHAPG"] = newLSHAPGIndex();
- indexMap["flatGPU"] = newFlatGPUIndex();
-#ifdef ENABLE_CUDA
- indexMap["SONG"] = newSONG();
-#endif
+ // indexMap["flatGPU"] = newFlatGPUIndex();
+//#ifdef ENABLE_CUDA
+ // indexMap["SONG"] = newSONG();
+//#endif
#if CANDY_CL == 1
// indexMap["cl"] = newCLMMCPPAlgo();
#endif
diff --git a/src/PyCANDY.cpp b/src/PyCANDY.cpp
index 514cdb6e5..e75fc6219 100644
--- a/src/PyCANDY.cpp
+++ b/src/PyCANDY.cpp
@@ -19,15 +19,15 @@
#include
#endif
//#if CANDY_DiskANN == 1
-#include "defaults.h"
-#include "distance.h"
+#include
+#include
#include
#include
//#endif
#include
-#include
+// #include
namespace py = pybind11;
using namespace INTELLI;
@@ -382,44 +382,44 @@ PYBIND11_MODULE(PyCANDYAlgo, m) {
#endif
- auto m_puck = m.def_submodule("puck", "Puck Interface from Baidu.");
- py::class_>(m_puck, "PuckSearcher")
- .def(py::init<>())
- .def("init", &py_puck_api::PySearcher::init)
- .def("show",&py_puck_api::PySearcher::show)
- .def("build",&py_puck_api::PySearcher::build)
- .def("search",&py_puck_api::PySearcher::search)
- .def("batch_add",&py_puck_api::PySearcher::batch_add)
- .def("batch_delete",&py_puck_api::PySearcher::batch_delete);
-
- m_puck.def("update_gflag", &py_puck_api::update_gflag, "A function to update gflag");
-
-
- auto m_utils = m.def_submodule("utils", "Utility Classes from CANDY.");
- py::class_,std::shared_ptr>>(m_utils,"NumpyIdxPair")
- .def(py::init<>())
- .def(py::init, std::vector>())
- .def_readwrite("vectors", &NumpyIdxPair::vectors)
- .def_readwrite("idx", &NumpyIdxPair::idx);
-
- py::class_, std::shared_ptr>>(m_utils, "NumpyIdxQueue")
- .def(py::init())
- .def("push", &SPSCWrapperNumpy::push)
- .def("try_push", &SPSCWrapperNumpy::try_push)
- .def("front", &SPSCWrapperNumpy::front)
- .def("empty", &SPSCWrapperNumpy::empty)
- .def("size", &SPSCWrapperNumpy::size)
- .def("capacity", &SPSCWrapperNumpy::capacity)
- .def("pop", &SPSCWrapperNumpy::pop);
- py::class_>(m_utils, "IdxQueue")
- .def(py::init())
- .def("push", &SPSCWrapperIdx::push)
- .def("try_push", &SPSCWrapperIdx::try_push)
- .def("front", &SPSCWrapperIdx::front)
- .def("empty", &SPSCWrapperIdx::empty)
- .def("size", &SPSCWrapperIdx::size)
- .def("capacity", &SPSCWrapperIdx::capacity)
- .def("pop", &SPSCWrapperIdx::pop);
+ // auto m_puck = m.def_submodule("puck", "Puck Interface from Baidu.");
+ // py::class_>(m_puck, "PuckSearcher")
+ // .def(py::init<>())
+ // .def("init", &py_puck_api::PySearcher::init)
+ // .def("show",&py_puck_api::PySearcher::show)
+ // .def("build",&py_puck_api::PySearcher::build)
+ // .def("search",&py_puck_api::PySearcher::search)
+ // .def("batch_add",&py_puck_api::PySearcher::batch_add)
+ // .def("batch_delete",&py_puck_api::PySearcher::batch_delete);
+
+ // m_puck.def("update_gflag", &py_puck_api::update_gflag, "A function to update gflag");
+
+
+ // auto m_utils = m.def_submodule("utils", "Utility Classes from CANDY.");
+ // py::class_,std::shared_ptr>>(m_utils,"NumpyIdxPair")
+ // .def(py::init<>())
+ // .def(py::init, std::vector>())
+ // .def_readwrite("vectors", &NumpyIdxPair::vectors)
+ // .def_readwrite("idx", &NumpyIdxPair::idx);
+
+ // py::class_, std::shared_ptr>>(m_utils, "NumpyIdxQueue")
+ // .def(py::init())
+ // .def("push", &SPSCWrapperNumpy::push)
+ // .def("try_push", &SPSCWrapperNumpy::try_push)
+ // .def("front", &SPSCWrapperNumpy::front)
+ // .def("empty", &SPSCWrapperNumpy::empty)
+ // .def("size", &SPSCWrapperNumpy::size)
+ // .def("capacity", &SPSCWrapperNumpy::capacity)
+ // .def("pop", &SPSCWrapperNumpy::pop);
+ // py::class_>(m_utils, "IdxQueue")
+ // .def(py::init())
+ // .def("push", &SPSCWrapperIdx::push)
+ // .def("try_push", &SPSCWrapperIdx::try_push)
+ // .def("front", &SPSCWrapperIdx::front)
+ // .def("empty", &SPSCWrapperIdx::empty)
+ // .def("size", &SPSCWrapperIdx::size)
+ // .def("capacity", &SPSCWrapperIdx::capacity)
+ // .def("pop", &SPSCWrapperIdx::pop);
auto m_diskann = m.def_submodule("diskannpy","diskann interface from microsoft.");
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 385caf912..f2dfc6b4f 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -50,9 +50,9 @@ add_catch_test(nnDescent_test SystemTest/NNDescentIndexTest.cpp CANDYBENCH)
add_catch_test(kdTree_test SystemTest/KdTreeTest.cpp CANDYBENCH)
add_catch_test(dpgIndex_test SystemTest/DPGIndexTest.cpp CANDYBENCH)
add_catch_test(lshAPGIndex_test SystemTest/LSHAPGIndexTest.cpp CANDYBENCH)
-if(ENABLE_CUDA)
- add_catch_test(song_test SystemTest/SONGTest.cu CANDYBENCH)
-endif ()
+#if(ENABLE_CUDA)
+# add_catch_test(song_test SystemTest/SONGTest.cu CANDYBENCH)
+#endif ()
if (ENABLE_SPTAG)
add_catch_test(sptagIndex_test SystemTest/SPTAGIndexTest.cpp CANDYBENCH)
endif ()