diff --git a/.gitignore b/.gitignore index 673241656d..20d21f1506 100644 --- a/.gitignore +++ b/.gitignore @@ -23,7 +23,7 @@ PET*.ESMF_LogFile /examples/ /src/Infrastructure/IO/PIO/Build/ /src/Infrastructure/IO/PIO/Install/ - +/holding # protex generated files # ########################## *_ccapi.tex diff --git a/ParallelIO/.github/workflows/autotools.yml b/ParallelIO/.github/workflows/autotools.yml new file mode 100644 index 0000000000..8708fed114 --- /dev/null +++ b/ParallelIO/.github/workflows/autotools.yml @@ -0,0 +1,66 @@ +name: autotools_ubuntu_latest + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + build: + + runs-on: ubuntu-latest + + env: + CC: mpicc + FC: mpifort + CPPFLAGS: "-I/usr/include -I/usr/local/include -I/home/runner/pnetcdf/include" + LDFLAGS: "-L/home/runner/pnetcdf/lib" + PNETCDF_VERSION: 1.12.3 + FCFLAGS: "-fallow-argument-mismatch" + steps: + - uses: actions/checkout@v3 + - name: Installs + run: | + set -x + # sudo gem install apt-spy2 + # sudo apt-spy2 check + # sudo apt-spy2 fix --commit + # after selecting a specific mirror, we need to run 'apt-get update' + sudo apt-get update + sudo apt-get install netcdf-bin + sudo apt-get install libnetcdf-dev + sudo apt-get install doxygen + sudo apt-get install graphviz + sudo apt-get install wget + sudo apt-get install gfortran + sudo apt-get install libjpeg-dev + sudo apt-get install libz-dev + sudo apt-get install openmpi-bin + sudo apt-get install libopenmpi-dev + - name: cache-pnetcdf + id: cache-pnetcdf + uses: actions/cache@v3 + with: + path: ~/pnetcdf + key: pnetcdf-${{ runner.os }}-${{ env.PNETCDF_VERSION }} + + - name: build-pnetcdf + if: steps.cache-pnetcdf.outputs.cache-hit != 'true' + run: | + set -x + wget https://parallel-netcdf.github.io/Release/pnetcdf-${{ env.PNETCDF_VERSION }}.tar.gz &> /dev/null + tar -xzvf pnetcdf-${{ env.PNETCDF_VERSION }}.tar.gz + pushd pnetcdf-${{ env.PNETCDF_VERSION }} + ./configure --prefix=/home/runner/pnetcdf --enable-shared --disable-cxx + make + sudo make install + popd + + - name: autoreconf + run: autoreconf -i + - name: configure + run: ./configure --enable-fortran --with-mpiexec='mpiexec --oversubscribe' + #run: ./configure --enable-fortran --enable-docs --with-mpiexec='mpiexec --oversubscribe' + - name: make check + run: make -j check diff --git a/ParallelIO/.github/workflows/cmake.yml b/ParallelIO/.github/workflows/cmake.yml new file mode 100644 index 0000000000..8d7443c2c6 --- /dev/null +++ b/ParallelIO/.github/workflows/cmake.yml @@ -0,0 +1,64 @@ +name: cmake + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + build: + + runs-on: ubuntu-latest + + env: + CC: mpicc + FC: mpifort + CPPFLAGS: "-I/usr/include -I/usr/local/include -I/home/runner/pnetcdf/include" + LDFLAGS: "-L/home/runner/pnetcdf/lib" + LD_LIBRARY_PATH: "/home/runner/pnetcdf/lib" + PNETCDF_VERSION: 1.12.3 + FCFLAGS: "-fallow-argument-mismatch" + steps: + - uses: actions/checkout@v3 + - name: Installs + run: | + set -x + #sudo gem install apt-spy2 + #sudo apt-spy2 check + #sudo apt-spy2 fix --commit + # after selecting a specific mirror, we need to run 'apt-get update' + sudo apt-get update + sudo apt-get install netcdf-bin libnetcdf-dev doxygen graphviz wget gfortran libjpeg-dev libz-dev openmpi-bin libopenmpi-dev cmake + + - name: cache-pnetcdf + id: cache-pnetcdf + uses: actions/cache@v3 + with: + path: ~/pnetcdf + key: pnetcdf-${{ runner.os }}-${{ env.PNETCDF_VERSION }} + + - name: build-pnetcdf + if: steps.cache-pnetcdf.outputs.cache-hit != 'true' + run: | + set -x + wget https://parallel-netcdf.github.io/Release/pnetcdf-${{ env.PNETCDF_VERSION }}.tar.gz &> /dev/null + tar -xzvf pnetcdf-${{ env.PNETCDF_VERSION }}.tar.gz + pushd pnetcdf-${{ env.PNETCDF_VERSION }} + ./configure --prefix=/home/runner/pnetcdf --enable-shared --disable-cxx + make + sudo make install + popd + + - name: cmake build + run: | + set -x + mkdir build + cd build + cmake -Wno-dev -DNetCDF_C_LIBRARY=/usr/lib/x86_64-linux-gnu/libnetcdf.so \ + -DNetCDF_C_INCLUDE_DIR=/usr/include -DPnetCDF_PATH='/home/runner/pnetcdf' \ + -DPIO_ENABLE_FORTRAN=Off \ + -DPIO_ENABLE_LOGGING=On -DPIO_ENABLE_TIMING=Off -DMPIEXEC_PREFLAGS="--oversubscribe" .. + make VERBOSE=1 + make tests VERBOSE=1 + ctest -VV diff --git a/ParallelIO/.github/workflows/cmake_ubuntu_latest.yml b/ParallelIO/.github/workflows/cmake_ubuntu_latest.yml new file mode 100644 index 0000000000..d998aae307 --- /dev/null +++ b/ParallelIO/.github/workflows/cmake_ubuntu_latest.yml @@ -0,0 +1,68 @@ +--- # cmake build without netcdf integration +name: cmake_ubuntu-latest + +on: + push: + branches: + - master + pull_request: + branches: + - master + +jobs: + build: + + runs-on: ubuntu-latest + + env: + CC: mpicc + FC: mpifort + CPPFLAGS: "-I/usr/include -I/usr/local/include -I/home/runner/pnetcdf/include" + LDFLAGS: "-L/home/runner/pnetcdf/lib" + LD_LIBRARY_PATH: "/home/runner/pnetcdf/lib" + PNETCDF_VERSION: 1.12.3 + FCFLAGS: "-fallow-argument-mismatch" + steps: + - uses: actions/checkout@v3 + - name: Installs + run: | + set -x + #sudo gem install apt-spy2 + #sudo apt-spy2 check + #sudo apt-spy2 fix --commit + # after selecting a specific mirror, we need to run 'apt-get update' + sudo apt-get update + sudo apt-get install netcdf-bin libnetcdf-dev doxygen graphviz wget gfortran libjpeg-dev libz-dev openmpi-bin libopenmpi-dev + + - name: cache-pnetcdf + id: cache-pnetcdf + uses: actions/cache@v3 + with: + path: ~/pnetcdf + key: pnetcdf-${{ runner.os }}-${{ env.PNETCDF_VERSION }} + + - name: build-pnetcdf + if: steps.cache-pnetcdf.outputs.cache-hit != 'true' + run: | + set -x + wget https://parallel-netcdf.github.io/Release/pnetcdf-${{ env.PNETCDF_VERSION }}.tar.gz &> /dev/null + tar -xzvf pnetcdf-${{ env.PNETCDF_VERSION }}.tar.gz + pushd pnetcdf-${{ env.PNETCDF_VERSION }} + ./configure --prefix=/home/runner/pnetcdf --enable-shared --disable-cxx + make + sudo make install + popd + + - name: cmake build + run: | + set -x + mkdir build + cd build + cmake -Wno-dev -DPIO_ENABLE_NETCDF_INTEGRATION=OFF \ + -DNetCDF_C_LIBRARY=/usr/lib/x86_64-linux-gnu/libnetcdf.so \ + -DNetCDF_C_INCLUDE_DIR=/usr/include -DPnetCDF_PATH='/home/runner/pnetcdf' \ + -DPIO_ENABLE_FORTRAN=OFF \ + -DPIO_ENABLE_LOGGING=On -DPIO_ENABLE_TIMING=Off -DMPIEXEC_PREFLAGS="--oversubscribe" .. + make VERBOSE=1 + make tests VERBOSE=1 + ctest -VV diff --git a/ParallelIO/.github/workflows/netcdf_hdf5_no_pnetcdf_ncint_mpich.yml b/ParallelIO/.github/workflows/netcdf_hdf5_no_pnetcdf_ncint_mpich.yml new file mode 100644 index 0000000000..6a8fb24f05 --- /dev/null +++ b/ParallelIO/.github/workflows/netcdf_hdf5_no_pnetcdf_ncint_mpich.yml @@ -0,0 +1,119 @@ +name: netcdf_hdf5_no_pnetcdf_ncint_mpich +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + build: + + runs-on: ubuntu-latest + + env: + CPPFLAGS: "-I/home/runner/mpich/include -I/home/runner/hdf5/include -I/home/runner/netcdf-c/include -I/home/runner/netcdf-fortran/include" + LDFLAGS: "-L/home/runner/mpich/lib -L/home/runner/hdf5/lib -L/home/runner/netcdf-c/lib -L/home/runner/netcdf-fortran/lib" + # Note issue https://github.com/NCAR/ParallelIO/issues/1889 netcdf integration currently only works with netcdf 4.7.4 + NETCDF_C_VERSION: 4.7.4 + NETCDF_F_VERSION: 4.5.4 + MPICH_VERSION: 4.0.3 + HDF5_VERSION_MAJOR: 1.12 + HDF5_VERSION_PATCH: 2 + FFLAGS: "-fallow-argument-mismatch" + FCFLAGS: "-fallow-argument-mismatch" + steps: + - uses: actions/checkout@v3 + - name: Installs + run: | + sudo apt-get install doxygen graphviz wget gfortran libjpeg-dev libz-dev + - name: cache-mpich + id: cache-mpich + uses: actions/cache@v3 + with: + path: ~/mpich + key: mpich-${{ runner.os }}-${{ env.MPICH_VERSION }} + + - name: build-mpich + if: steps.cache-mpich.outputs.cache-hit != 'true' + run: | + wget http://www.mpich.org/static/downloads/${{ env.MPICH_VERSION }}/mpich-${{ env.MPICH_VERSION }}.tar.gz &> /dev/null + tar -xzf mpich-${{ env.MPICH_VERSION }}.tar.gz + pushd mpich-${{ env.MPICH_VERSION }} + ./configure --prefix=/home/runner/mpich + make + sudo make install + popd + - name: cache-hdf5 + id: cache-hdf5 + uses: actions/cache@v3 + with: + path: ~/hdf5 + key: hdf5-${{ runner.os }}-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }}-mpich-${{ env.MPICH_VERSION }} + + - name: build-hdf5 + if: steps.cache-hdf5.outputs.cache-hit != 'true' + run: | + set -x + export PATH="/home/runner/mpich/bin:$PATH" + export CC=/home/runner/mpich/bin/mpicc + wget https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-${{ env.HDF5_VERSION_MAJOR }}/hdf5-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }}/src/hdf5-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }}.tar.gz &> /dev/null + tar -xzf hdf5-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }}.tar.gz + pushd hdf5-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }} + ./configure --prefix=/home/runner/hdf5 --enable-parallel --disable-tools --disable-fortran --disable-cxx --enable-parallel-tests + make + sudo make install + popd + - name: cache-netcdf-c + id: cache-netcdf-c + uses: actions/cache@v3 + with: + path: ~/netcdf-c + key: netcdf-c-${{ runner.os }}-${{ env.NETCDF_C_VERSION }}-mpich-${{ env.MPICH_VERSION }}-hdf5-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }} + + - name: build-netcdf-c + if: steps.cache-netcdf-c.outputs.cache-hit != 'true' + run: | + set -x + export PATH="/home/runner/mpich/bin:$PATH" + export CC=/home/runner/mpich/bin/mpicc + wget https://github.com/Unidata/netcdf-c/archive/refs/tags/v${{ env.NETCDF_C_VERSION }}.tar.gz + tar -xzf v${{ env.NETCDF_C_VERSION }}.tar.gz &> /dev/null + pushd netcdf-c-${{ env.NETCDF_C_VERSION }} + ./configure --prefix=/home/runner/netcdf-c --disable-dap --disable-utilities + make -j + sudo make install + popd + + - name: cache-netcdf-fortran + id: cache-netcdf-fortran + uses: actions/cache@v3 + with: + path: ~/netcdf-fortran + key: netcdf-fortran-${{ runner.os }}-${{ env.NETCDF_F_VERSION }}-mpich-${{ env.MPICH_VERSION }}-hdf5-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }} + + - name: build-netcdf-fortran + if: steps.cache-netcdf-fortran.outputs.cache-hit != 'true' + run: | + set -x + export PATH="/home/runner/mpich/bin:$PATH" + export CC=/home/runner/mpich/bin/mpicc + wget https://github.com/Unidata/netcdf-fortran/archive/refs/tags/v${{ env.NETCDF_F_VERSION }}.tar.gz + tar -zxf v${{ env.NETCDF_F_VERSION }}.tar.gz &> /dev/null + pushd netcdf-fortran-${{ env.NETCDF_F_VERSION }} + ./configure --prefix=/home/runner/netcdf-fortran + make -j + sudo make install + popd + + - name: autotools build + run: | + set -x + gcc --version + export PATH=/home/runner/mpich/bin:/home/runner/netcdf-c/bin:$PATH + export CC=/home/runner/mpich/bin/mpicc + export FC=/home/runner/mpich/bin/mpifort + autoreconf -i + ./configure --enable-fortran --enable-netcdf-integration --disable-pnetcdf + make -j check + + diff --git a/ParallelIO/.github/workflows/netcdf_hdf5_pnetcdf_ncint_mpich_asan.yml b/ParallelIO/.github/workflows/netcdf_hdf5_pnetcdf_ncint_mpich_asan.yml new file mode 100644 index 0000000000..7ee67f90f2 --- /dev/null +++ b/ParallelIO/.github/workflows/netcdf_hdf5_pnetcdf_ncint_mpich_asan.yml @@ -0,0 +1,162 @@ +name: netcdf_hdf5_pnetcdf_ncint_mpich_asan +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + build: + + runs-on: ubuntu-latest + + env: + CPPFLAGS: "-I/home/runner/mpich/include -I/home/runner/hdf5/include -I/home/runner/netcdf-c/include -I/home/runner/netcdf-fortran/include -I/home/runner/pnetcdf/include" + LDFLAGS: "-L/home/runner/mpich/lib -L/home/runner/hdf5/lib -L/home/runner/netcdf-c/lib -L/home/runner/netcdf-fortran/lib -L/home/runner/pnetcdf/lib" + # Note issue https://github.com/NCAR/ParallelIO/issues/1889 netcdf integration only currently works with netcdf-c 4.7.4 + NETCDF_C_VERSION: 4.7.4 + NETCDF_F_VERSION: 4.5.4 + PNETCDF_VERSION: 1.12.3 + MPICH_VERSION: 4.0.3 + HDF5_VERSION_MAJOR: 1.12 + HDF5_VERSION_PATCH: 2 + FCFLAGS: "-fallow-argument-mismatch" + FFLAGS: "-fallow-argument-mismatch" + steps: + - uses: actions/checkout@v3 + - name: Installs + run: | + sudo apt-get install doxygen graphviz wget gfortran libjpeg-dev libz-dev + - name: cache-mpich + id: cache-mpich + uses: actions/cache@v3 + with: + path: ~/mpich + key: mpich-${{ runner.os }}-${{ env.MPICH_VERSION }} + + - name: build-mpich + if: steps.cache-mpich.outputs.cache-hit != 'true' + run: | + wget http://www.mpich.org/static/downloads/${{ env.MPICH_VERSION }}/mpich-${{ env.MPICH_VERSION }}.tar.gz &> /dev/null + tar -xzf mpich-${{ env.MPICH_VERSION }}.tar.gz + pushd mpich-${{ env.MPICH_VERSION }} + ./configure --prefix=/home/runner/mpich + make + sudo make install + popd + - name: cache-hdf5 + id: cache-hdf5 + uses: actions/cache@v3 + with: + path: ~/hdf5 + key: hdf5-${{ runner.os }}-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }}-mpich-${{ env.MPICH_VERSION }} + + - name: build-hdf5 + if: steps.cache-hdf5.outputs.cache-hit != 'true' + run: | + set -x + export PATH="/home/runner/mpich/bin:$PATH" + export CC=/home/runner/mpich/bin/mpicc + wget https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-${{ env.HDF5_VERSION_MAJOR }}/hdf5-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }}/src/hdf5-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }}.tar.gz &> /dev/null + tar -xzf hdf5-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }}.tar.gz + pushd hdf5-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }} + ./configure --prefix=/home/runner/hdf5 --enable-parallel --disable-tools --disable-fortran --disable-cxx --enable-parallel-tests + make + sudo make install + popd + - name: cache-netcdf-c + id: cache-netcdf-c + uses: actions/cache@v3 + with: + path: ~/netcdf-c + key: netcdf-c-${{ runner.os }}-${{ env.NETCDF_C_VERSION }}-mpich-${{ env.MPICH_VERSION }}-hdf5-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }} + + - name: build-netcdf-c + if: steps.cache-netcdf-c.outputs.cache-hit != 'true' + run: | + set -x + export PATH="/home/runner/mpich/bin:$PATH" + export CC=/home/runner/mpich/bin/mpicc + wget https://github.com/Unidata/netcdf-c/archive/refs/tags/v${{ env.NETCDF_C_VERSION }}.tar.gz + tar -xzf v${{ env.NETCDF_C_VERSION }}.tar.gz + pushd netcdf-c-${{ env.NETCDF_C_VERSION }} + ./configure --prefix=/home/runner/netcdf-c --disable-dap --disable-utilities + make -j + sudo make install + popd + - name: cache-netcdf-fortran + id: cache-netcdf-fortran + uses: actions/cache@v3 + with: + path: ~/netcdf-fortran + key: netcdf-fortran-${{ runner.os }}-${{ env.NETCDF_F_VERSION }}-mpich-${{ env.MPICH_VERSION }}-hdf5-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }} + + - name: build-netcdf-fortran + if: steps.cache-netcdf-fortran.outputs.cache-hit != 'true' + run: | + set -x + export PATH="/home/runner/mpich/bin:$PATH" + export CC=/home/runner/mpich/bin/mpicc + wget https://github.com/Unidata/netcdf-fortran/archive/refs/tags/v${{ env.NETCDF_F_VERSION }}.tar.gz + tar -xzf v${{ env.NETCDF_F_VERSION }}.tar.gz + pushd netcdf-fortran-${{ env.NETCDF_F_VERSION }} + ./configure --prefix=/home/runner/netcdf-fortran + make -j + sudo make install + popd + - name: cache-pnetcdf + id: cache-pnetcdf + uses: actions/cache@v3 + with: + path: ~/pnetcdf + key: pnetcdf-${{ runner.os }}-${{ env.PNETCDF_VERSION }}-mpich-${{ env.MPICH_VERSION }} + + - name: build-pnetcdf + if: steps.cache-pnetcdf.outputs.cache-hit != 'true' + run: | + set -x + export PATH="/home/runner/mpich/bin:$PATH" + export CC=/home/runner/mpich/bin/mpicc + wget https://parallel-netcdf.github.io/Release/pnetcdf-${{ env.PNETCDF_VERSION }}.tar.gz &> /dev/null + tar -xzf pnetcdf-${{ env.PNETCDF_VERSION }}.tar.gz + pushd pnetcdf-${{ env.PNETCDF_VERSION }} + ./configure --prefix=/home/runner/pnetcdf --enable-shared --disable-cxx + make + sudo make install + popd + - name: cmake build + run: | + set -x + gcc --version + echo 'export PATH=/home/runner/mpich/bin:$PATH' > .bashrc + source .bashrc + export CC=/home/runner/mpich/bin/mpicc + export FC=/home/runner/mpich/bin/mpifort + export CFLAGS="-g -O0 -fsanitize=address -fno-omit-frame-pointer -static-libasan" + export FCFLAGS="$FCFLAGS -g -O0 -fsanitize=address -fno-omit-frame-pointer -static-libasan" + export LDFLAGS="$LDFLAGS -static-libasan" + export LD_LIBRARY_PATH="/home/runner/netcdf-c/lib:/home/runner/mpich/lib:/home/runner/hdf5/lib:/home/runner/netcdf-fortran/lib:/home/runner/pnetcdf/lib:$LD_LIBRARY_PATH" + export ASAN_OPTIONS="detect_odr_violation=0" + mkdir build + cd build + cmake -Wno-dev -DNetCDF_C_LIBRARY=/home/runner/netcdf-c/lib/libnetcdf.so -DNetCDF_C_INCLUDE_DIR=/home/runner/netcdf-c/include -DPnetCDF_PATH='/home/runner/pnetcdf' -DPIO_ENABLE_FORTRAN=Off -DPIO_ENABLE_LOGGING=On -DPIO_ENABLE_TIMING=Off .. || (cat CMakeFiles/CMakeOutput.log && cat CMakeFiles/CMakeError.log) + make VERBOSE=1 + make tests VERBOSE=1 +# ctest -VV + - name: autotools build + run: | + set -x + gcc --version + echo 'export PATH=/home/runner/mpich/bin:$PATH' > .bashrc + source .bashrc + export CC=/home/runner/mpich/bin/mpicc + export FC=/home/runner/mpich/bin/mpifort + export CFLAGS="-g -O0 -fsanitize=address -fno-omit-frame-pointer -static-libasan" + export FCFLAGS="$FCFLAGS -g -O0 -fsanitize=address -fno-omit-frame-pointer -static-libasan" + export LDFLAGS="$LDFLAGS -static-libasan" + export ASAN_OPTIONS="detect_odr_violation=0" + autoreconf -i + ./configure --enable-fortran --enable-netcdf-integration + make -j check + + diff --git a/ParallelIO/.github/workflows/netcdf_pnetcdf_openmpi.yml b/ParallelIO/.github/workflows/netcdf_pnetcdf_openmpi.yml new file mode 100644 index 0000000000..4ab12970de --- /dev/null +++ b/ParallelIO/.github/workflows/netcdf_pnetcdf_openmpi.yml @@ -0,0 +1,156 @@ +name: netcdf_pnetcdf_openmpi + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + build: + + runs-on: ubuntu-latest + + env: + CPPFLAGS: "-I/home/runner/openmpi/include -I/home/runner/hdf5/include -I/home/runner/netcdf-c/include -I/home/runner/netcdf-fortran/include -I/home/runner/pnetcdf/include" + LDFLAGS: "-L/home/runner/openmpi/lib -L/home/runner/hdf5/lib -L/home/runner/netcdf-c/lib -L/home/runner/netcdf-fortran/lib -L/home/runner/pnetcdf/lib" + NETCDF_C_VERSION: 4.9.0 + NETCDF_F_VERSION: 4.6.0 + OPENMPI_VERSION_MAJOR: 4.1 + OPENMPI_VERSION_PATCH: 4 + PNETCDF_VERSION: 1.12.3 + HDF5_VERSION_PATCH: 2 + HDF5_VERSION_MAJOR: 1.12 + FCFLAGS: "-fallow-argument-mismatch" + FFLAGS: "-fallow-argument-mismatch" + steps: + - uses: actions/checkout@v3 + - name: Installs + run: | + sudo apt-get install doxygen graphviz wget gfortran libjpeg-dev libz-dev + - name: cache-openmpi + id: cache-openmpi + uses: actions/cache@v3 + with: + path: ~/openmpi + key: openmpi-${{ runner.os }}-${{ env.OPENMPI_VERSION }} + + - name: build-openmpi + if: steps.cache-openmpi.outputs.cache-hit != 'true' + run: | + wget https://download.open-mpi.org/release/open-mpi/v${{ env.OPENMPI_VERSION_MAJOR }}/openmpi-${{ env.OPENMPI_VERSION_MAJOR }}.${{ env.OPENMPI_VERSION_PATCH}}.tar.gz &> /dev/null + tar -xzf openmpi-${{ env.OPENMPI_VERSION_MAJOR }}.${{ env.OPENMPI_VERSION_PATCH }}.tar.gz + pushd openmpi-${{ env.OPENMPI_VERSION_MAJOR }}.${{ env.OPENMPI_VERSION_PATCH }} + ./configure --prefix=/home/runner/openmpi + make + sudo make install + popd + - name: cache-hdf5 + id: cache-hdf5 + uses: actions/cache@v3 + with: + path: ~/hdf5 + key: hdf5-${{ runner.os }}-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }}-openmpi-${{ env.OPENMPI_VERSION_MAJOR }}.${{ env.OPENMPI_VERSION_PATCH }} + + - name: build-hdf5 + if: steps.cache-hdf5.outputs.cache-hit != 'true' + run: | + set -x + export PATH="/home/runner/openmpi/bin:$PATH" + export CC=/home/runner/openmpi/bin/mpicc + wget https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-${{ env.HDF5_VERSION_MAJOR }}/hdf5-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }}/src/hdf5-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }}.tar.gz &> /dev/null + tar -xzf hdf5-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }}.tar.gz + pushd hdf5-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }} + ./configure --prefix=/home/runner/hdf5 --enable-parallel --disable-tools --disable-fortran --disable-cxx --enable-parallel-tests + make + sudo make install + popd + - name: cache-netcdf-c + id: cache-netcdf-c + uses: actions/cache@v3 + with: + path: ~/netcdf-c + key: netcdf-c-${{ runner.os }}-${{ env.NETCDF_C_VERSION }}-openmpi-${{ env.OPENMPI_VERSION_MAJOR }}.${{ env.OPENMPI_VERSION_PATCH }}-hdf5-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }} + + - name: build-netcdf-c + if: steps.cache-netcdf-c.outputs.cache-hit != 'true' + run: | + set -x + export PATH="/home/runner/openmpi/bin:$PATH" + export CC=/home/runner/openmpi/bin/mpicc + wget https://github.com/Unidata/netcdf-c/archive/refs/tags/v${{ env.NETCDF_C_VERSION }}.tar.gz + tar -xzf v${{ env.NETCDF_C_VERSION }}.tar.gz &> /dev/null + pushd netcdf-c-${{ env.NETCDF_C_VERSION }} + ./configure --prefix=/home/runner/netcdf-c --disable-dap --disable-utilities + make -j + sudo make install + popd + - name: cache-netcdf-fortran + id: cache-netcdf-fortran + uses: actions/cache@v3 + with: + path: ~/netcdf-fortran + key: netcdf-fortran-${{ runner.os }}-${{ env.NETCDF_F_VERSION }}-openmpi-${{ env.OPENMPI_VERSION_MAJOR }}.${{ env.OPENMPI_VERSION_PATCH }}-hdf5-${{ env.HDF5_VERSION_MAJOR }}.${{ env.HDF5_VERSION_PATCH }} + + - name: build-netcdf-fortran + if: steps.cache-netcdf-fortran.outputs.cache-hit != 'true' + run: | + set -x + export PATH="/home/runner/openmpi/bin:$PATH" + export CC=/home/runner/openmpi/bin/mpicc + wget https://github.com/Unidata/netcdf-fortran/archive/refs/tags/v${{ env.NETCDF_F_VERSION }}.tar.gz + tar -zxf v${{ env.NETCDF_F_VERSION }}.tar.gz &> /dev/null + pushd netcdf-fortran-${{ env.NETCDF_F_VERSION }} + ./configure --prefix=/home/runner/netcdf-fortran + make -j + sudo make install + popd + - name: cache-pnetcdf + id: cache-pnetcdf + uses: actions/cache@v3 + with: + path: ~/pnetcdf + key: pnetcdf-${{ runner.os }}-${{ env.PNETCDF_VERSION }}-openmpi-${{ env.OPENMPI_VERSION_MAJOR }}.${{ env.OPENMPI_VERSION_PATCH }} + + - name: build-pnetcdf + if: steps.cache-pnetcdf.outputs.cache-hit != 'true' + run: | + set -x + export PATH="/home/runner/openmpi/bin:$PATH" + export CC=/home/runner/openmpi/bin/mpicc + wget https://parallel-netcdf.github.io/Release/pnetcdf-${{ env.PNETCDF_VERSION }}.tar.gz &> /dev/null + tar -xzf pnetcdf-${{ env.PNETCDF_VERSION }}.tar.gz + pushd pnetcdf-${{ env.PNETCDF_VERSION }} + ./configure --prefix=/home/runner/pnetcdf --enable-shared --disable-cxx + make + sudo make install + popd + - name: autotools build + run: | + set -x + echo 'export PATH=/home/runner/openmpi/bin:$PATH' > .bashrc + source .bashrc + export PATH="/home/runner/openmpi/bin:$PATH" + export CC=/home/runner/openmpi/bin/mpicc + autoreconf -i + ./configure --with-mpiexec='mpiexec --oversubscribe' + cat config.h + make check + - name: cmake build + run: | + set -x + echo 'export PATH=/home/runner/openmpi/bin:$PATH' > .bashrc + source .bashrc + export CC=mpicc + mkdir build + cd build + export LD_LIBRARY_PATH="/home/runner/netcdf-c/lib:/home/runner/pnetcdf/lib:/home/runner/hdf5/lib:/home/runner/openmpi/lib:$LD_LIBRARY_PATH" + cmake -Wno-dev -DWITH_MPIEXEC='/home/runner/openmpi/bin/mpiexec;--oversubscribe' \ + -DNetCDF_C_LIBRARY=/home/runner/netcdf-c/lib/libnetcdf.so \ + -DNetCDF_C_INCLUDE_DIR=/home/runner/netcdf-c/include -DPnetCDF_PATH='/home/runner/pnetcdf' \ + -DPIO_ENABLE_FORTRAN=Off -DPIO_HDF5_LOGGING=On -DPIO_USE_MALLOC=On -DPIO_ENABLE_LOGGING=On \ + -DPIO_ENABLE_TIMING=Off .. || (cat CMakeFiles/CMakeOutput.log && cat CMakeFiles/CMakeError.log) + cat config.h + make VERBOSE=1 + make tests VERBOSE=1 + ctest -VV diff --git a/ParallelIO/.github/workflows/strict_autotools_ubuntu_latest.yml b/ParallelIO/.github/workflows/strict_autotools_ubuntu_latest.yml new file mode 100644 index 0000000000..faf74ce40b --- /dev/null +++ b/ParallelIO/.github/workflows/strict_autotools_ubuntu_latest.yml @@ -0,0 +1,62 @@ +name: strict_autotools_ubuntu_latest + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + build: + + runs-on: ubuntu-latest + + env: + CC: mpicc + FC: mpifort + CPPFLAGS: "-I/usr/include -I/usr/local/include -I/home/runner/pnetcdf/include" + LDFLAGS: "-L/home/runner/pnetcdf/lib" + FCFLAGS: "-fallow-argument-mismatch" + PNETCDF_VERSION: 1.12.3 + steps: + - uses: actions/checkout@v3 + - name: Installs + run: | + set -x + #sudo gem install apt-spy2 + #sudo apt-spy2 check + #sudo apt-spy2 fix --commit + # after selecting a specific mirror, we need to run 'apt-get update' + sudo apt-get update + sudo apt-get install netcdf-bin libnetcdf-dev doxygen graphviz wget gfortran libjpeg-dev libz-dev openmpi-bin libopenmpi-dev + + - name: cache-pnetcdf + id: cache-pnetcdf + uses: actions/cache@v3 + with: + path: ~/pnetcdf + key: pnetcdf-${{ runner.os }}-${{ env.PNETCDF_VERSION }} + + - name: build-pnetcdf + if: steps.cache-pnetcdf.outputs.cache-hit != 'true' + run: | + set -x + wget https://parallel-netcdf.github.io/Release/pnetcdf-${{ env.PNETCDF_VERSION }}.tar.gz &> /dev/null + tar -xzvf pnetcdf-${{ env.PNETCDF_VERSION }}.tar.gz + pushd pnetcdf-${{ env.PNETCDF_VERSION }} + ./configure --prefix=/home/runner/pnetcdf --enable-shared --disable-cxx + make + sudo make install + popd + + - name: autoreconf + run: autoreconf -i + - name: build + run: | + set -x + export CFLAGS="-std=c99 -Wall" + export FFLAGS="-Wall" + export FCFLAGS="$FCFLAGS -Wall" + export DISTCHECK_CONFIGURE_FLAGS="--enable-fortran --with-mpiexec='/usr/bin/mpiexec --oversubscribe'" + ./configure + make -j distcheck diff --git a/ParallelIO/.gitignore b/ParallelIO/.gitignore new file mode 100644 index 0000000000..5055b0c6a8 --- /dev/null +++ b/ParallelIO/.gitignore @@ -0,0 +1,32 @@ +.project +html/ +*~ +\#*\# +*.o +Makefile.in +*.lo +*.la +Makefile +acinclude.m4 +aclocal.m4 +atconfig +autom4te.cache +config-h.in +config.* +configure +stamp-h1 +conftest* +missing +libtool +install-sh +ltmain.sh +compile +depcomp +build/ +.deps/ +.libs/ +m4/ +*.nc +*.log +*.gz +!/decomps/*/*.nc \ No newline at end of file diff --git a/ParallelIO/CMakeLists.txt b/ParallelIO/CMakeLists.txt new file mode 100644 index 0000000000..5abb33a588 --- /dev/null +++ b/ParallelIO/CMakeLists.txt @@ -0,0 +1,494 @@ +# This is part of the PIO library. + +# This is the CMake build file for the main directory. + +# Jim Edwards + +cmake_minimum_required (VERSION 3.5.2) +project (PIO C) + +# The project version number. +set(VERSION_MAJOR 2 CACHE STRING "Project major version number.") +set(VERSION_MINOR 5 CACHE STRING "Project minor version number.") +set(VERSION_PATCH 10 CACHE STRING "Project patch version number.") + +mark_as_advanced(VERSION_MAJOR VERSION_MINOR VERSION_PATCH) + +# Create version info in autotools parlance for pio_meta.h. +set(PIO_VERSION_MAJOR ${VERSION_MAJOR}) +set(PIO_VERSION_MINOR ${VERSION_MINOR}) +set(PIO_VERSION_PATCH ${VERSION_PATCH}) + +# This is needed for the libpio.settings file. +SET(PACKAGE_VERSION ${PIO_VERSION_MAJOR}.${PIO_VERSION_MINOR}.${PIO_VERSION_PATCH}) + +# This provides cmake_print_variables() function for debugging. +include(CMakePrintHelpers) + +# Determine the configure date. +IF(DEFINED ENV{SOURCE_DATE_EPOCH}) + EXECUTE_PROCESS( + COMMAND "date" "-u" "-d" "@$ENV{SOURCE_DATE_EPOCH}" + OUTPUT_VARIABLE CONFIG_DATE + ) +ELSE() + EXECUTE_PROCESS( + COMMAND date + OUTPUT_VARIABLE CONFIG_DATE + ) +ENDIF() +IF(CONFIG_DATE) + string(STRIP ${CONFIG_DATE} CONFIG_DATE) +ENDIF() + +# A function used to create autotools-style 'yes/no' definitions. +# If a variable is set, it 'yes' is returned. Otherwise, 'no' is +# returned. +# +# Also creates a version of the ret_val prepended with 'NC', +# when feature is true, which is used to generate netcdf_meta.h. +FUNCTION(is_enabled feature ret_val) + IF(${feature}) + SET(${ret_val} "yes" PARENT_SCOPE) + SET("PIO_${ret_val}" 1 PARENT_SCOPE) + ELSE() + SET(${ret_val} "no" PARENT_SCOPE) + SET("PIO_${ret_val}" 0 PARENT_SCOPE) + ENDIF(${feature}) +ENDFUNCTION() + +# A function used to create autotools-style 'yes/no' definitions. +# If a variable is set, it 'yes' is returned. Otherwise, 'no' is +# returned. +# +# Also creates a version of the ret_val prepended with 'NC', +# when feature is true, which is used to generate netcdf_meta.h. +FUNCTION(is_disabled feature ret_val) + IF(${feature}) + SET(${ret_val} "no" PARENT_SCOPE) + ELSE() + SET(${ret_val} "yes" PARENT_SCOPE) + SET("PIO_${ret_val}" 1 PARENT_SCOPE) + ENDIF(${feature}) +ENDFUNCTION() + +# The size of the data buffer for write/read_darray(). +set(PIO_BUFFER_SIZE 134217728) + +#============================================================================== +# USER-DEFINED OPTIONS (set with "-DOPT=VAL" from command line) +#============================================================================== + +#===== Library Options ===== +option (PIO_ENABLE_FORTRAN "Enable the Fortran library builds" ON) +option (PIO_ENABLE_TIMING "Enable the use of the GPTL timing library" ON) +option (PIO_ENABLE_LOGGING "Enable debug logging (large output possible)" OFF) +option (PIO_ENABLE_DOC "Enable building PIO documentation" ON) +option (PIO_ENABLE_COVERAGE "Enable code coverage" OFF) +option (PIO_ENABLE_EXAMPLES "Enable PIO examples" ON) +option (PIO_ENABLE_NETCDF_INTEGRATION "Enable netCDF integration" OFF) +option (PIO_INTERNAL_DOC "Enable PIO developer documentation" OFF) +option (PIO_TEST_BIG_ENDIAN "Enable test to see if machine is big endian" ON) +option (PIO_USE_MPIIO "Enable support for MPI-IO auto detect" ON) +option (PIO_USE_MPISERIAL "Enable mpi-serial support (instead of MPI)" OFF) +option (PIO_USE_PNETCDF_VARD "Use pnetcdf put_vard " OFF) +option (WITH_PNETCDF "Require the use of PnetCDF" ON) +option (BUILD_SHARED_LIBS "Build shared libraries" OFF) + +if(APPLE) + # The linker on macOS does not include `common symbols` by default + # Passing the -c flag includes them and fixes an error with undefined symbols (err_buffer, resultlen) + set(CMAKE_C_ARCHIVE_FINISH " -c ") +endif() + +# Set a variable that appears in the config.h.in file. +if(PIO_USE_PNETCDF_VARD) + set(USE_VARD 1) +else() + set(USE_VARD 0) +endif() + +# Set a variable that appears in the config.h.in file. +if(PIO_ENABLE_LOGGING) + set(ENABLE_LOGGING 1) + set(HAS_LOGGING "yes") +else() + set(ENABLE_LOGGING 0) + set(HAS_LOGGING "no") +endif() + +# Set a variable that appears in the config.h.in file. +if(PIO_ENABLE_NETCDF_INTEGRATION) + set(NETCDF_INTEGRATION 1) +else() + set(NETCDF_INTEGRATION 0) +endif() + +if(PIO_USE_MPISERIAL) + set(USE_MPI_SERIAL 1) +else() + set(USE_MPI_SERIAL 0) +endif() + +#============================================================================== +# PREPEND TO CMAKE MODULE PATH +#============================================================================== + +#===== Local modules ===== +list (APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake) + +#===== External modules ===== +if (PIO_ENABLE_FORTRAN) + enable_language(Fortran) + if (NOT DEFINED USER_CMAKE_MODULE_PATH) + message (STATUS "Importing CMake_Fortran_utils") + execute_process( + COMMAND git clone https://github.com/CESM-Development/CMake_Fortran_utils + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + OUTPUT_QUIET + ERROR_QUIET) + find_path (USER_CMAKE_MODULE_PATH + NAMES mpiexec.cmake + HINTS ${CMAKE_BINARY_DIR}/CMake_Fortran_utils) + if (USER_CMAKE_MODULE_PATH) + message (STATUS "Importing CMake_Fortran_utils - success") + else () + message (FATAL_ERROR "Failed to import CMake_Fortran_utils") + endif () + endif () + set (USER_CMAKE_MODULE_PATH ${USER_CMAKE_MODULE_PATH} + CACHE STRING "Location of the CMake_Fortran_utils") + list (APPEND CMAKE_MODULE_PATH ${USER_CMAKE_MODULE_PATH}) +endif () + +INCLUDE (CheckTypeSize) + +#===== MPI ===== +if (PIO_USE_MPISERIAL) + find_package (MPISERIAL COMPONENTS C REQUIRED) + if (MPISERIAL_C_FOUND) + set (CMAKE_REQUIRED_INCLUDES ${MPISERIAL_C_INCLUDE_DIRS}) + endif () +else () + find_package (MPI REQUIRED) + set (CMAKE_REQUIRED_INCLUDES ${MPI_INCLUDE_PATH}) +endif () + +#SET(CMAKE_EXTRA_INCLUDE_FILES "mpi.h") +#check_type_size("MPI_Offset" SIZEOF_MPI_OFFSET) +#SET(CMAKE_EXTRA_INCLUDE_FILES) + +#===== Library Variables ===== +set (PIO_FILESYSTEM_HINTS IGNORE CACHE STRING "Filesystem hints (lustre or gpfs)") + +#===== Testing Options ===== +option (PIO_ENABLE_TESTS "Enable the testing builds" ON) +option (PIO_VALGRIND_CHECK "Enable memory leak check using valgrind" OFF) + +#============================================================================== +# BACKWARDS COMPATIBILITY +#============================================================================== + +# Old NETCDF_DIR variable --> NetCDF_PATH +if (DEFINED NETCDF_DIR) + set (NetCDF_PATH ${NETCDF_DIR} + CACHE STRING "Location of the NetCDF library installation") +endif () + +# Old PNETCDF_DIR variable --> PnetCDF_PATH +if (DEFINED PNETCDF_DIR) + set (PnetCDF_PATH ${PNETCDF_DIR} + CACHE STRING "Location of the PnetCDF library installation") +endif () + +#============================================================================== +# HELPFUL GLOBAL VARIABLES +#============================================================================== + +# System Name +string (TOUPPER "${CMAKE_SYSTEM_NAME}" CMAKE_SYSTEM_NAME_CAPS) +set (CMAKE_SYSTEM_DIRECTIVE "${CMAKE_SYSTEM_NAME_CAPS}" + CACHE STRING "System name preprocessor directive") + +# C Compiler Name +string (TOUPPER "${CMAKE_C_COMPILER_ID}" CMAKE_C_COMPILER_NAME) +if (CMAKE_C_COMPILER_NAME STREQUAL "XL") + set (CMAKE_C_COMPILER_NAME "IBM") +endif () +set (CMAKE_C_COMPILER_DIRECTIVE "CPR${CMAKE_C_COMPILER_NAME}" + CACHE STRING "C compiler name preprocessor directive") + +# Fortran Compiler Name +if (PIO_ENABLE_FORTRAN) + string (TOUPPER "${CMAKE_Fortran_COMPILER_ID}" CMAKE_Fortran_COMPILER_NAME) + if (CMAKE_Fortran_COMPILER_NAME STREQUAL "XL") + set (CMAKE_Fortran_COMPILER_NAME "IBM") + endif () + set (CMAKE_Fortran_COMPILER_DIRECTIVE "CPR${CMAKE_Fortran_COMPILER_NAME}" + CACHE STRING "Fortran compiler name preprocessor directive") +endif() +#============================================================================== +# SET CODE COVERAGE COMPILER FLAGS +#============================================================================== + +# Only support GNU compilers at this time +if (PIO_ENABLE_COVERAGE) + if (CMAKE_C_COMPILER_NAME STREQUAL "GNU") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fprofile-arcs -ftest-coverage") + else () + message (WARNING "The C compiler is non-GNU: coverage of C code could NOT be enabled") + endif () + if (CMAKE_Fortran_COMPILER_NAME STREQUAL "GNU") + set (CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -fprofile-arcs -ftest-coverage") + else () + message (WARNING "The Fortran compiler is non-GNU: coverage of Fortran code could NOT be enabled") + endif () +endif () + +# Allow argument mismatch in gfortran versions > 10 for mpi library compatibility +if (CMAKE_C_COMPILER_NAME STREQUAL "GNU") + if ("${CMAKE_Fortran_COMPILER_VERSION}" VERSION_LESS 10) + else() + set (CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -fallow-argument-mismatch") + endif() +endif() +# Include this so we can check values in netcdf_meta.h. +INCLUDE(CheckCSourceCompiles) +INCLUDE(FindNetCDF) +message("Fortran Library build is ${PIO_ENABLE_FORTRAN}") +if (PIO_ENABLE_FORTRAN) + find_package (NetCDF COMPONENTS C Fortran) + if (NOT NetCDF_Fortran_FOUND) + include(FindPkgConfig) + pkg_check_modules(NetCDF_Fortran REQUIRED IMPORTED_TARGET "netcdf-fortran") + endif() + if (WITH_PNETCDF) + find_package (PnetCDF COMPONENTS C Fortran) + endif() +else() + find_package (NetCDF REQUIRED COMPONENTS C) + if (WITH_PNETCDF) + find_package (PnetCDF COMPONENTS C) + endif() +endif() + +# Did we find pnetCDF? If so, set _PNETCDF in config.h. +if (PnetCDF_C_FOUND) + set(_PNETCDF 1) +endif () + +#============================================================================== +# INCLUDE SOURCE DIRECTORIES +#============================================================================== + +# Libraries +add_subdirectory (src) + +#============================================================================== +# TESTING TARGET +#============================================================================== + +# Custom "piotests" target (builds the test executables) +add_custom_target (tests) +if (PIO_ENABLE_FORTRAN) + add_dependencies (tests pioc piof) +else() + add_dependencies (tests pioc) +endif() + +# Custom "check" target that depends upon "tests" +add_custom_target (check COMMAND ${CMAKE_CTEST_COMMAND}) +add_dependencies (check tests) + +# Tests +if (PIO_ENABLE_TESTS) + enable_testing() + include (CTest) + add_subdirectory (tests) + if (PIO_ENABLE_EXAMPLES) + add_subdirectory (examples) + endif () +endif () + +# Documentation +if (PIO_ENABLE_DOC) + add_subdirectory (doc) +endif () + +SET(STATUS_PNETCDF ${PnetCDF_C_FOUND}) + +### +# Check to see if netcdf-4 capability is present in netcdf-c. +### +CHECK_C_SOURCE_COMPILES(" +#include +#if !NC_HAS_NC4 + choke me +#endif +int main() {return 0;}" HAVE_NETCDF4) + +### +# Check to see if netcdf-4 parallel I/O capability is present in +# netcdf-c. (Really we should be checking NC_HAS_PARALLEL4, but that +# was only recently introduced, so we will go with NC_HAS_PARALLEL.) +### +CHECK_C_SOURCE_COMPILES(" +#include +#if !NC_HAS_PARALLEL + choke me +#endif +int main() {return 0;}" HAVE_NETCDF_PAR) + +# Set this synonym for HAVE_NETCDF_PAR. It is defined in config.h. +if (HAVE_NETCDF_PAR) + set(_NETCDF4 1) +endif () + +### +# Check to see if szip write capability is present in netcdf-c. +### +SET(CMAKE_REQUIRED_INCLUDES ${NetCDF_C_INCLUDE_DIR}) +CHECK_C_SOURCE_COMPILES(" +#include +#if !NC_HAS_SZIP_WRITE + choke me +#endif +int main() {return 0;}" HAVE_SZIP_WRITE) + +### +# Check to see if parallel filters are supported by HDF5/netcdf-c. +### +CHECK_C_SOURCE_COMPILES(" +#include +#if !NC_HAS_PAR_FILTERS + choke me +#endif +int main() {return 0;}" HDF5_HAS_PAR_FILTERS) +if(HDF5_HAS_PAR_FILTERS) + set(HAVE_PAR_FILTERS 1) +else() + set(HAVE_PAR_FILTERS 0) +endif() + +### +# Check to see if this is netcdf-c-4.7.2, which won't work. +### +CHECK_C_SOURCE_COMPILES(" +#include +#if NC_VERSION_MAJOR == 4 && NC_VERSION_MINOR == 7 && NC_VERSION_PATCH == 2 +#else + choke me +#endif +int main() {return 0;}" HAVE_472) +if (HAVE_472) + message (FATAL_ERROR "PIO cannot build with netcdf-c-4.7.2, please upgrade your netCDF library") +endif () + +### +# Check to see if dispatch table version 2 is supported for netcdf integration. +### +CHECK_C_SOURCE_COMPILES(" +#include +#if NC_DISPATCH_VERSION != 2 + choke me +#endif +int main() {return 0;}" HAVE_DISPATCH2) + +if (NETCDF_INTEGRATION) + if (NOT HAVE_DISPATCH2) + message (FATAL_ERROR "Need newer version of netcdf-c for netcdf integration feature, please upgrade your netCDF library") + endif () + set(HAVE_NETCDF_INTEGRATION 1) +else () + set(HAVE_NETCDF_INTEGRATION 0) +endif () + +# Configure testing with MPIEXEC. +if (NOT WITH_MPIEXEC) + set(WITH_MPIEXEC mpiexec) +endif() +#set(MPIEXEC "${WITH_MPIEXEC}" CACHE INTERNAL "") +set(MPIEXEC "${WITH_MPIEXEC}") +set_property(GLOBAL PROPERTY WITH_MPIEXEC "${WITH_MPIEXEC}") + +##### +# Configure and print the libpio.settings file. +##### + +# Get system configuration, Use it to determine osname, os release, cpu. These +# will be used when committing to CDash. +find_program(UNAME NAMES uname) +IF(UNAME) + macro(getuname name flag) + exec_program("${UNAME}" ARGS "${flag}" OUTPUT_VARIABLE "${name}") + endmacro(getuname) + getuname(osname -s) + getuname(osrel -r) + getuname(cpu -m) + set(TMP_BUILDNAME "${osname}-${osrel}-${cpu}") +ENDIF() + +# Set +SET(prefix ${CMAKE_INSTALL_PREFIX}) +SET(exec_prefix ${CMAKE_INSTALL_PREFIX}) +SET(libdir ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}) +SET(includedir ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}) +SET(CC ${CMAKE_C_COMPILER}) + +# Set variables to mirror those used by autoconf. +# This way we don't need to maintain two separate template +# files. +SET(host_cpu "${cpu}") +SET(host_vendor "${osname}") +SET(host_os "${osrel}") +SET(abs_top_builddir "${CMAKE_CURRENT_BINARY_DIR}") +SET(abs_top_srcdir "${CMAKE_CURRENT_SOURCE_DIR}") + +SET(CC_VERSION "${CMAKE_C_COMPILER_ID} ${CMAKE_C_COMPILER_VERSION}") +SET(FC_VERSION "${CMAKE_Fortran_COMPILER_ID} ${CMAKE_Fortran_COMPILER_VERSION}") +# Build *FLAGS for libpio.settings. (CFLAGS, CPPFLAGS, FFLAGS promoted from src) +SET(LDFLAGS "${CMAKE_EXE_LINKER_FLAGS} ${CMAKE_EXE_LINKER_FLAGS_${CMAKE_BUILD_TYPE}}") + +is_disabled(BUILD_SHARED_LIBS enable_static) +is_enabled(BUILD_SHARED_LIBS enable_shared) + +is_enabled(USE_SZIP HAS_SZIP_WRITE) +is_enabled(STATUS_PNETCDF HAS_PNETCDF) +is_enabled(HAVE_H5Z_SZIP HAS_SZLIB) +is_enabled(HDF5_HAS_PAR_FILTERS HAS_PAR_FILTERS) +is_enabled(HAVE_NETCDF4 HAS_NETCDF4) +is_enabled(HAVE_NETCDF_PAR HAS_NETCDF4_PAR) +is_enabled(HAVE_NETCDF_INTEGRATION HAS_NETCDF_INTEGRATION) +is_enabled(PIO_ENABLE_FORTRAN HAS_PIO_FORTRAN) + +# Generate file from template. +CONFIGURE_FILE("${CMAKE_CURRENT_SOURCE_DIR}/libpio.settings.in" + "${CMAKE_CURRENT_BINARY_DIR}/libpio.settings" + @ONLY) + +# Read in settings file, print out. +# Avoid using system-specific calls so that this +# might also work on Windows. +FILE(READ "${CMAKE_CURRENT_BINARY_DIR}/libpio.settings" + LIBPIO_SETTINGS) +MESSAGE(STATUS ${LIBPIO_SETTINGS}) + +# Install libpio.settings file into same location +# as the libraries. +INSTALL(FILES "${PIO_BINARY_DIR}/libpio.settings" + DESTINATION lib + COMPONENT libraries) + +##### +# Create pio_meta.h include file. +##### +configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/src/clib/pio_meta.h.in + ${CMAKE_CURRENT_BINARY_DIR}/src/clib/pio_meta.h @ONLY) + +# configure a header file to pass some of the CMake settings +# to the source code +configure_file ( + "${PROJECT_SOURCE_DIR}/cmake_config.h.in" + "${PROJECT_BINARY_DIR}/config.h" + ) diff --git a/ParallelIO/CTestConfig.cmake b/ParallelIO/CTestConfig.cmake new file mode 100644 index 0000000000..cd7099ae6b --- /dev/null +++ b/ParallelIO/CTestConfig.cmake @@ -0,0 +1,23 @@ +## This file should be placed in the root directory of your project. +## Then modify the CMakeLists.txt file in the root directory of your +## project to incorporate the testing dashboard. +## +## # The following are required to submit to the CDash dashboard: +## ENABLE_TESTING() +## INCLUDE(CTest) + +set (CTEST_PROJECT_NAME "PIO") +set (CTEST_NIGHTLY_START_TIME "00:00:00 EST") + +set (CTEST_DROP_METHOD "http") +if (DEFINED ENV{PIO_DASHBOARD_DROP_SITE}) + set (CTEST_DROP_SITE "$ENV{PIO_DASHBOARD_DROP_SITE}") +else () + set (CTEST_DROP_SITE "my.cdash.org") +endif () +if (DEFINED ENV{PIO_DASHBOARD_PROJECT_NAME}) + set (CTEST_DROP_LOCATION "/submit.php?project=$ENV{PIO_DASHBOARD_PROJECT_NAME}") +else () + set (CTEST_DROP_LOCATION "/submit.php?project=PIO") +endif () +set (CTEST_DROP_SITE_CDASH TRUE) diff --git a/ParallelIO/CTestScript.cmake b/ParallelIO/CTestScript.cmake new file mode 100644 index 0000000000..a8cd24d76c --- /dev/null +++ b/ParallelIO/CTestScript.cmake @@ -0,0 +1,195 @@ +#============================================================================== +# +# This is the CTest script for PIO builds and submission to the CTest +# Dashboard site: my.cdash.org. +# +# Example originally stolen from: +# http://www.vtk.org/Wiki/CTest:Using_CTEST_and_CDASH_without_CMAKE +#============================================================================== + +#--------------------------------------- +#-- User-defined setup from environment +#--------------------------------------- + +## -- CTest Dashboard Root Directory +if (DEFINED ENV{PIO_DASHBOARD_ROOT}) + set (CTEST_DASHBOARD_ROOT "$ENV{PIO_DASHBOARD_ROOT}") +else () + set (CTEST_DASHBOARD_ROOT "$ENV{HOME}/pio-dashboard") +endif () + +## -- Compiler ID +if (DEFINED ENV{PIO_COMPILER_ID}) + set (compid "$ENV{PIO_COMPILER_ID}") +else () + set (compid "?") +endif () + +## -- CTest Dashboard Build Group +set (CTEST_BUILD_GROUP "${CTEST_SCRIPT_ARG}") + +#--------------------------------------- +#-- Get the machine environment +#--------------------------------------- + +## -- Set hostname + +find_program (HOSTNAME_CMD NAMES hostname) +execute_process (COMMAND ${HOSTNAME_CMD} + OUTPUT_VARIABLE HOSTNAME + OUTPUT_STRIP_TRAILING_WHITESPACE) + +## -- Set hostname ID (e.g., alcf, nwsc, nersc, ...) +message ("hostname is ${HOSTNAME}") + +if (DEFINED HOSTNAME_ID) +else() +# UCAR/NWSC Machines +if (HOSTNAME MATCHES "^yslogin" OR + HOSTNAME MATCHES "^geyser" OR + HOSTNAME MATCHES "^caldera" OR + HOSTNAME MATCHES "^pronghorn") + set (HOSTNAME_ID "nwsc") +# New UCAR/NWSC SGI Machines +elseif (HOSTNAME MATCHES "^laramie" OR + HOSTNAME MATCHES "^chadmin" OR + HOSTNAME MATCHES "^cheyenne") + set (HOSTNAME_ID "nwscla") +# ALCF/Argonne Machines +elseif (HOSTNAME MATCHES "^mira" OR + HOSTNAME MATCHES "^cetus" OR + HOSTNAME MATCHES "^vesta" OR + HOSTNAME MATCHES "^cooley") + set (HOSTNAME_ID "alcf") +# NERSC Machines +elseif (HOSTNAME MATCHES "^edison" OR + HOSTNAME MATCHES "^cori" OR + HOSTNAME MATCHES "^nid") + set (HOSTNAME_ID "nersc") +# Blue Waters at NCSA +elseif (HOSTNAME MATCHES "^h2ologin" ) + set (HOSTNAME_ID "ncsa") +# CGD local linux cluster +elseif (HOSTNAME MATCHES "^hobart") + set (HOSTNAME_ID "cgd") +# Argonne Linux workstations +elseif (HOSTNAME MATCHES "^compute001" OR + HOSTNAME MATCHES "^thwomp" OR + HOSTNAME MATCHES "^stomp" OR + HOSTNAME MATCHES "^crush" OR + HOSTNAME MATCHES "^crank" OR + HOSTNAME MATCHES "^steamroller" OR + HOSTNAME MATCHES "^grind" OR + HOSTNAME MATCHES "^churn" OR + HOSTNAME MATCHES "^trounce" OR + HOSTNAME MATCHES "^thrash" OR + HOSTNAME MATCHES "^vanquish") + set (HOSTNAME_ID "anlworkstation") +else () + if (CMAKE_SYSTEM_NAME MATCHES "Catamount") + set (HOSTNAME_ID "ncsa") + else () + set (HOSTNAME_ID "unknown") + endif () +endif () +endif() +## -- Get system info + +find_program (UNAME NAMES uname) +function (getuname name flag) + execute_process (COMMAND ${UNAME} ${flag} + OUTPUT_VARIABLE res + OUTPUT_STRIP_TRAILING_WHITESPACE) + set (${name} ${res} PARENT_SCOPE) +endfunction () + +getuname (osname -s) +getuname (osrel -r) +getuname (cpu -m) + +## -- Git command +find_program (CTEST_GIT_COMMAND NAMES git) + +## -- make command +find_program (MAKE NAMES make) + +#----------------------------------------------------------- +#-- Generate build-specific information +#----------------------------------------------------------- + +## -- CTest Site Name + +set (CTEST_SITE "${HOSTNAME_ID}-${HOSTNAME}") + +## -- CTest Build Name + +set (CTEST_BUILD_NAME "${osname}-${osrel}-${cpu}-${compid}") + +## -- SRC Dir (where this script exists) +set (CTEST_SOURCE_DIRECTORY "${CTEST_SCRIPT_DIRECTORY}") + +## -- BIN Dir +set (CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}/build-${CTEST_BUILD_NAME}-${CTEST_BUILD_GROUP}") + +## -- Add the CTest script directory to the module path +set (CTEST_EXTRA_SCRIPT_PATH "${CTEST_SOURCE_DIRECTORY}/ctest") +list (APPEND CMAKE_MODULE_PATH ${CTEST_EXTRA_SCRIPT_PATH}) + +# ----------------------------------------------------------- +# -- Store Build-Specific Info (environment variables) +# ----------------------------------------------------------- + +set (ENV{PIO_DASHBOARD_SITE} ${CTEST_SITE}) +set (ENV{PIO_DASHBOARD_BUILD_NAME} ${CTEST_BUILD_NAME}) +set (ENV{PIO_DASHBOARD_SOURCE_DIR} ${CTEST_SOURCE_DIRECTORY}) +set (ENV{PIO_DASHBOARD_BINARY_DIR} ${CTEST_BINARY_DIRECTORY}) + +# ----------------------------------------------------------- +# -- Run CTest +# ----------------------------------------------------------- + +## -- Empty the binary directory +ctest_empty_binary_directory(${CTEST_BINARY_DIRECTORY}) + +## -- Start +message (" -- Hostname_id = ${HOSTNAME_ID}") +message (" -- Start dashboard - ${CTEST_BUILD_NAME} --") +ctest_start("${CTEST_SCRIPT_ARG}") + +## -- Update +message (" -- Update source - ${CTEST_BUILD_NAME} --") +set (CTEST_UPDATE_COMMAND "${CTEST_GIT_COMMAND}") +ctest_update () + +## -- Configure +message (" -- Configure build - ${CTEST_BUILD_NAME} -- with options ${CTEST_CONFIGURE_OPTIONS}") +include (CTestEnvironment-${HOSTNAME_ID}) +set (CTEST_CONFIGURE_COMMAND "${CMAKE_COMMAND} ${CTEST_CONFIGURE_OPTIONS} ${CTEST_SOURCE_DIRECTORY}") +ctest_configure () + +## -- BUILD +message (" -- Build - ${CTEST_BUILD_NAME} --") +set (CTEST_BUILD_COMMAND "${MAKE} tests") +ctest_build () + +## -- TEST +message (" -- Test - ${CTEST_BUILD_NAME} --") +execute_process (COMMAND ${CTEST_EXTRA_SCRIPT_PATH}/runctest-${HOSTNAME_ID}.sh + ${CTEST_EXTRA_SCRIPT_PATH} ${CTEST_SCRIPT_ARG} + WORKING_DIRECTORY ${CTEST_BINARY_DIRECTORY}) + +## -- SUBMIT +message (" -- Submit to dashboard - ${CTEST_BUILD_NAME} --") +message ("** -- PIO_DASHBOARD_SITE=$ENV{PIO_DASHBOARD_SITE}") +ctest_submit () + +# ----------------------------------------------------------- +# -- Clear environment +# ----------------------------------------------------------- + +unset (ENV{PIO_DASHBOARD_SITE}) +unset (ENV{PIO_DASHBOARD_BUILD_NAME}) +unset (ENV{PIO_DASHBOARD_SOURCE_DIR}) +unset (ENV{PIO_DASHBOARD_BINARY_DIR}) + +message (" -- Finished - ${CTEST_BUILD_NAME} --") diff --git a/ParallelIO/LICENSE b/ParallelIO/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/ParallelIO/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ParallelIO/Makefile.am b/ParallelIO/Makefile.am new file mode 100644 index 0000000000..41d1265c1f --- /dev/null +++ b/ParallelIO/Makefile.am @@ -0,0 +1,18 @@ +# This is part of PIO. It creates the main Makefile. + +# Ed Hartnett + +# Look in the m4 directory for autotools stuff. +ACLOCAL_AMFLAGS= -I m4 + +# Does the user want to build doxygen documentation? +if BUILD_DOCS +DOC = doc +endif + +# Build in each of these subdirs. +SUBDIRS = src tests examples ${DOC} scripts cmake + +# Add these files to the distribution. +EXTRA_DIST = CMakeLists.txt LICENSE cmake_config.h.in \ +libpio.settings.in diff --git a/ParallelIO/README.md b/ParallelIO/README.md new file mode 100644 index 0000000000..3ecaaa6a05 --- /dev/null +++ b/ParallelIO/README.md @@ -0,0 +1,117 @@ +# ParallelIO + +The Parallel IO libraries (PIO) are high-level parallel I/O C and +Fortran libraries for applications that need to do netCDF I/O from +large numbers of processors on a HPC system. + +PIO provides a netCDF-like API, and allows users to designate some +subset of processors to perform IO. Computational code calls +netCDF-like functions to read and write data, and PIO uses the IO +processors to perform all necessary IO. + +## Intracomm Mode + +In Intracomm mode, PIO allows the user to designate some subset of +processors to do all I/O. The I/O processors also participate in +computational work. + +![I/O on Many Processors with Async + Mode](./doc/images/I_O_on_Many_Intracomm.png) + +## Async Mode + +PIO also supports the creation of multiple computation components, +each containing many processors, and one shared set of IO +processors. The computational components can perform write operation +asynchronously, and the IO processors will take care of all storage +interaction. + +![I/O on Many Processors with Async + Mode](./doc/images/I_O_on_Many_Async.png) + +## Website + +For complete documentation, see our website at +[http://ncar.github.io/ParallelIO/](http://ncar.github.io/ParallelIO/). + +## Mailing List + +The (low-traffic) PIO mailing list is at +https://groups.google.com/forum/#!forum/parallelio, send email to the +list at parallelio@googlegroups.com. + +## Testing + +The results of our continuous integration testing with GitHub actions +can be found on any of the Pull Requests on the GitHub site: +https://github.com/NCAR/ParallelIO. + +The results of our nightly tests on multiple platforms can be found on +our cdash site at +[http://my.cdash.org/index.php?project=PIO](http://my.cdash.org/index.php?project=PIO). + +## Dependencies + +PIO can use NetCDF (version 4.6.1+) and/or PnetCDF (version 1.9.0+) +for I/O. NetCDF may be built with or without netCDF-4 features. NetCDF +is required for PIO, PnetCDF is optional. + +The NetCDF C library must be built with MPI, which requires that it be +linked with an MPI-enabled version of HDF5. Optionally, NetCDF can be +built with DAP support, which introduces a dependency on CURL. HDF5, +itself, introduces dependencies on LIBZ and (optionally) SZIP. + +## Building PIO + +To build PIO, unpack the distribution tarball and do: + +``` +CC=mpicc FC=mpif90 ./configure --enable-fortran && make check install +``` + +For a full description of the available options and flags, try: +``` +./configure --help +``` + +Note that environment variables CC and FC may need to be set to the +MPI versions of the C and Fortran compiler. Also CPPFLAGS and LDFLAGS +may need to be set to indicate the locations of one or more of the +dependent libraries. (If using MPI compilers, the entire set of +dependent libraries should be built with the same compilers.) For +example: + +``` +export CC=mpicc +export FC=mpifort +export CPPFLAGS='-I/usr/local/netcdf-fortran-4.4.5_c_4.6.3_mpich-3.2/include -I/usr/local/netcdf-c-4.6.3_hdf5-1.10.5/include -I/usr/local/pnetcdf-1.11.0_shared/include' +export LDFLAGS='-L/usr/local/netcdf-c-4.6.3_hdf5-1.10.5/lib -L/usr/local/pnetcdf-1.11.0_shared/lib' +./configure --prefix=/usr/local/pio-2.4.2 --enable-fortran +make check +make install +``` + +## Building with CMake + +The typical configuration with CMake can be done as follows: + +``` +CC=mpicc FC=mpif90 cmake [-DOPTION1=value1 -DOPTION2=value2 ...] /path/to/pio/source +``` + +Full instructions for the cmake build can be found in the installation +documentation. + +# References + +Hartnett, E., Edwards, J., "THE PARALLELIO (PIO) C/FORTRAN LIBRARIES +FOR SCALABLE HPC PERFORMANCE", 37th Conference on Environmental +Information Processing Technologies, American Meteorological Society +Annual Meeting, January, 2021. Retrieved on Feb 3, 2021, from +[https://www.researchgate.net/publication/348169990_THE_PARALLELIO_PIO_CFORTRAN_LIBRARIES_FOR_SCALABLE_HPC_PERFORMANCE]. + +Hartnett, E., Edwards, J., "POSTER: THE PARALLELIO (PIO) C/FORTRAN LIBRARIES +FOR SCALABLE HPC PERFORMANCE", 37th Conference on Environmental +Information Processing Technologies, American Meteorological Society +Annual Meeting, January, 2021. Retrieved on Feb 3, 2021, from +[https://www.researchgate.net/publication/348170136_THE_PARALLELIO_PIO_CFORTRAN_LIBRARIES_FOR_SCALABLE_HPC_PERFORMANCE]. diff --git a/ParallelIO/cmake/FindGPTL.cmake b/ParallelIO/cmake/FindGPTL.cmake new file mode 100644 index 0000000000..c223c1b346 --- /dev/null +++ b/ParallelIO/cmake/FindGPTL.cmake @@ -0,0 +1,72 @@ +# - Try to find GPTL +# +# This can be controlled by setting the GPTL_DIR (or, equivalently, the +# GPTL environment variable), or GPTL__DIR CMake variables, where +# is the COMPONENT language one needs. +# +# Once done, this will define: +# +# GPTL__FOUND (BOOL) - system has GPTL +# GPTL__IS_SHARED (BOOL) - whether library is shared/dynamic +# GPTL__INCLUDE_DIR (PATH) - Location of the C header file +# GPTL__INCLUDE_DIRS (LIST) - the GPTL include directories +# GPTL__LIBRARY (FILE) - Path to the C library file +# GPTL__LIBRARIES (LIST) - link these to use GPTL +# +# The available COMPONENTS are: C Fortran Perfmod +# If no components are specified, it assumes only C +include (LibFind) + +# Define GPTL C Component +define_package_component (GPTL DEFAULT + COMPONENT C + INCLUDE_NAMES gptl.h + LIBRARY_NAMES gptl) + +# Define GPTL Fortran Component +define_package_component (GPTL + COMPONENT Fortran + INCLUDE_NAMES gptl.mod + LIBRARY_NAMES gptl) + +# Define GPTL Fortran_Perf Component +define_package_component (GPTL + COMPONENT Fortran_Perf + INCLUDE_NAMES perf_mod.mod + LIBRARY_NAMES gptl) + +# Search for list of valid components requested +find_valid_components (GPTL) + +#============================================================================== +# SEARCH FOR VALIDATED COMPONENTS +foreach (GPTL_comp IN LISTS GPTL_FIND_VALID_COMPONENTS) + + # If not found already, search... + if (NOT GPTL_${GPTL_comp}_FOUND) + + # Manually add the MPI include and library dirs to search paths + if (GPTL_comp STREQUAL C AND MPI_C_FOUND) + set (mpiincs ${MPI_C_INCLUDE_PATH}) + set (mpilibs ${MPI_C_LIBRARIES}) + set (mpifound ${MPI_C_FOUND}) + elseif (MPI_Fortran_FOUND) + set (mpiincs ${MPI_Fortran_INCLUDE_PATH}) + set (mpilibs ${MPI_Fortran_LIBRARIES}) + set (mpifound ${MPI_Fortran_FOUND}) + endif () + + # Search for the package component + if (mpifound) + initialize_paths (GPTL_${GPTL_comp}_PATHS + INCLUDE_DIRECTORIES ${mpiincs} + LIBRARIES ${mpilibs}) + find_package_component(GPTL COMPONENT ${GPTL_comp} + PATHS ${GPTL_${GPTL_comp}_PATHS}) + else () + find_package_component(GPTL COMPONENT ${GPTL_comp}) + endif () + + endif () + +endforeach () diff --git a/ParallelIO/cmake/FindHDF5.cmake b/ParallelIO/cmake/FindHDF5.cmake new file mode 100644 index 0000000000..e918277b1a --- /dev/null +++ b/ParallelIO/cmake/FindHDF5.cmake @@ -0,0 +1,118 @@ +# - Try to find HDF5 +# +# This can be controlled by setting the HDF5_DIR (or, equivalently, the +# HDF5 environment variable), or HDF5__DIR CMake variables, where +# is the COMPONENT language one needs. +# +# Once done, this will define: +# +# HDF5__FOUND (BOOL) - system has HDF5 +# HDF5__IS_SHARED (BOOL) - whether library is shared/dynamic +# HDF5__INCLUDE_DIR (PATH) - Location of the C header file +# HDF5__INCLUDE_DIRS (LIST) - the HDF5 include directories +# HDF5__LIBRARY (FILE) - Path to the C library file +# HDF5__LIBRARIES (LIST) - link these to use HDF5 +# +# The available COMPONENTS are: C HL Fortran Fortran_HL +# If no components are specified, it assumes only C +include (LibFind) + +# Define HDF5 C Component +define_package_component (HDF5 DEFAULT + COMPONENT C + INCLUDE_NAMES hdf5.h + LIBRARY_NAMES hdf5) + +# Define HDF5 HL Component +define_package_component (HDF5 + COMPONENT HL + INCLUDE_NAMES hdf5_hl.h + LIBRARY_NAMES hdf5_hl) + +# Define HDF5 Fortran Component +define_package_component (HDF5 + COMPONENT Fortran + INCLUDE_NAMES hdf5.mod + LIBRARY_NAMES hdf5_fortran) + +# Define HDF5 Fortran_HL Component +define_package_component (HDF5 + COMPONENT Fortran_HL + INCLUDE_NAMES hdf5.mod + LIBRARY_NAMES hdf5hl_fortran) + +# Search for list of valid components requested +find_valid_components (HDF5) + +#============================================================================== +# SEARCH FOR VALIDATED COMPONENTS +foreach (HDF5_comp IN LISTS HDF5_FIND_VALID_COMPONENTS) + + # If not found already, search... + if (NOT HDF5_${HDF5_comp}_FOUND) + + # Manually add the MPI include and library dirs to search paths + if ( (HDF5_comp STREQUAL C OR HDF5_comp STREQUAL HL) AND MPI_C_FOUND) + set (mpiincs ${MPI_C_INCLUDE_PATH}) + set (mpilibs ${MPI_C_LIBRARIES}) + set (mpifound ${MPI_C_FOUND}) + elseif (MPI_Fortran_FOUND) + set (mpiincs ${MPI_Fortran_INCLUDE_PATH}) + set (mpilibs ${MPI_Fortran_LIBRARIES}) + set (mpifound ${MPI_Fortran_FOUND}) + endif () + + # Search for the package component + if (mpifound) + initialize_paths (HDF5_${HDF5_comp}_PATHS + INCLUDE_DIRECTORIES ${mpiincs} + LIBRARIES ${mpilibs}) + find_package_component(HDF5 COMPONENT ${HDF5_comp} + PATHS ${HDF5_${HDF5_comp}_PATHS}) + else () + find_package_component(HDF5 COMPONENT ${HDF5_comp}) + endif () + + # Continue only if found + if (HDF5_${HDF5_comp}_FOUND) + + # Dependencies + if (HDF5_comp STREQUAL C AND NOT HDF5_C_IS_SHARED) + + # DEPENDENCY: LIBZ + find_package (LIBZ) + if (LIBZ_FOUND) + list (APPEND HDF5_C_INCLUDE_DIRS ${LIBZ_INCLUDE_DIRS}) + list (APPEND HDF5_C_LIBRARIES ${LIBZ_LIBRARIES}) + endif () + + # DEPENDENCY: SZIP (Optional) + check_macro (HDF5_C_HAS_SZIP + NAME TryHDF5_HAS_SZIP.c + HINTS ${CMAKE_MODULE_PATH} + DEFINITIONS -I${HDF5_C_INCLUDE_DIRS} + COMMENT "whether HDF5 has SZIP support") + if (HDF5_C_HAS_SZIP) + find_package (SZIP) + if (SZIP_FOUND) + list (APPEND HDF5_C_INCLUDE_DIRS ${SZIP_INCLUDE_DIRS}) + list (APPEND HDF5_C_LIBRARIES ${SZIP_LIBRARIES}) + endif () + endif () + + elseif (NOT HDF5_${HDF5_comp}_IS_SHARED) + + # DEPENDENCY: HDF5 + find_package (HDF5 COMPONENTS C) + if (HDF5_C_FOUND) + list (APPEND HDF5_${HDF5_comp}_INCLUDE_DIRS ${HDF5_C_INCLUDE_DIRS}) + list (APPEND HDF5_${HDF5_comp}_LIBRARIES ${HDF5_C_LIBRARIES}) + endif () + + endif () + + endif () + + endif () + +endforeach () diff --git a/ParallelIO/cmake/FindLIBRT.cmake b/ParallelIO/cmake/FindLIBRT.cmake new file mode 100644 index 0000000000..1f55f9f3f1 --- /dev/null +++ b/ParallelIO/cmake/FindLIBRT.cmake @@ -0,0 +1,28 @@ +# - Try to find LIBRT +# +# This can be controlled by setting the LIBRT_DIR (or, equivalently, the +# LIBRT environment variable). +# +# Once done, this will define: +# +# LIBRT_FOUND (BOOL) - system has LIBRT +# LIBRT_IS_SHARED (BOOL) - whether library is shared/dynamic +# LIBRT_INCLUDE_DIR (PATH) - Location of the C header file +# LIBRT_INCLUDE_DIRS (LIST) - the LIBRT include directories +# LIBRT_LIBRARY (FILE) - Path to the C library file +# LIBRT_LIBRARIES (LIST) - link these to use LIBRT +# +include (LibFind) + +# Define LIBRT package +define_package_component (LIBRT + INCLUDE_NAMES time.h + LIBRARY_NAMES rt) + +# SEARCH FOR PACKAGE +if (NOT LIBRT_FOUND) + + # Search for the package + find_package_component(LIBRT) + +endif () diff --git a/ParallelIO/cmake/FindLIBZ.cmake b/ParallelIO/cmake/FindLIBZ.cmake new file mode 100644 index 0000000000..8ebbaefeed --- /dev/null +++ b/ParallelIO/cmake/FindLIBZ.cmake @@ -0,0 +1,37 @@ +# - Try to find LIBZ +# +# This can be controlled by setting the LIBZ_DIR (or, equivalently, the +# LIBZ environment variable). +# +# Once done, this will define: +# +# LIBZ_FOUND (BOOL) - system has LIBZ +# LIBZ_IS_SHARED (BOOL) - whether library is shared/dynamic +# LIBZ_INCLUDE_DIR (PATH) - Location of the C header file +# LIBZ_INCLUDE_DIRS (LIST) - the LIBZ include directories +# LIBZ_LIBRARY (FILE) - Path to the C library file +# LIBZ_LIBRARIES (LIST) - link these to use LIBZ +# +include (LibFind) + +# Define LIBZ package +define_package_component (LIBZ + INCLUDE_NAMES zlib.h + LIBRARY_NAMES z) + +# SEARCH FOR PACKAGE +if (NOT LIBZ_FOUND) + + # Manually add the MPI include and library dirs to search paths + # and search for the package component + if (MPI_C_FOUND) + initialize_paths (LIBZ_PATHS + INCLUDE_DIRECTORIES ${MPI_C_INCLUDE_PATH} + LIBRARIES ${MPI_C_LIBRARIES}) + find_package_component(LIBZ + PATHS ${LIBZ_PATHS}) + else () + find_package_component(LIBZ) + endif () + +endif () diff --git a/ParallelIO/cmake/FindMPE.cmake b/ParallelIO/cmake/FindMPE.cmake new file mode 100644 index 0000000000..5a964172da --- /dev/null +++ b/ParallelIO/cmake/FindMPE.cmake @@ -0,0 +1,50 @@ +# - Try to find MPE +# +# This can be controlled by setting the MPE_PATH (or, equivalently, +# the MPE environment variable), or MPE__PATH CMake variables, +# where is the COMPONENT language one needs. +# +# Once done, this will define: +# +# MPE__FOUND (BOOL) - system has MPE +# MPE__IS_SHARED (BOOL) - whether library is shared/dynamic +# MPE__INCLUDE_DIR (PATH) - Location of the C header file +# MPE__INCLUDE_DIRS (LIST) - the MPE include directories +# MPE__LIBRARY (FILE) - Path to the C library file +# MPE__LIBRARIES (LIST) - link these to use MPE +# +# The available COMPONENTS are: C +include (LibFind) +include (LibCheck) + +# Define MPE C Component +define_package_component (MPE DEFAULT + COMPONENT C + INCLUDE_NAMES mpe.h + LIBRARY_NAMES mpe) + +# Search for list of valid components requested +find_valid_components (MPE) + +#============================================================================== +# SEARCH FOR VALIDATED COMPONENTS +foreach (NCDFcomp IN LISTS MPE_FIND_VALID_COMPONENTS) + + # If not found already, search... + if (NOT MPE_${NCDFcomp}_FOUND) + + # Manually add the MPI include and library dirs to search paths + # and search for the package component + if (MPI_${NCDFcomp}_FOUND) + initialize_paths (MPE_${NCDFcomp}_PATHS + INCLUDE_DIRECTORIES ${MPI_${NCDFcomp}_INCLUDE_PATH} + LIBRARIES ${MPI_${NCDFcomp}_LIBRARIES}) + find_package_component(MPE COMPONENT ${NCDFcomp} + PATHS ${MPE_${NCDFcomp}_PATHS}) + else () + find_package_component(MPE COMPONENT ${NCDFcomp}) + endif () + + endif () + +endforeach () diff --git a/ParallelIO/cmake/FindMPISERIAL.cmake b/ParallelIO/cmake/FindMPISERIAL.cmake new file mode 100644 index 0000000000..09906eb7a2 --- /dev/null +++ b/ParallelIO/cmake/FindMPISERIAL.cmake @@ -0,0 +1,44 @@ +# - Try to find MPISERIAL +# +# This can be controlled by setting the MPISERIAL_PATH (or, equivalently, the +# MPISERIAL environment variable). +# +# Once done, this will define: +# +# MPISERIAL_FOUND (BOOL) - system has MPISERIAL +# MPISERIAL_IS_SHARED (BOOL) - whether library is shared/dynamic +# MPISERIAL_INCLUDE_DIR (PATH) - Location of the C header file +# MPISERIAL_INCLUDE_DIRS (LIST) - the MPISERIAL include directories +# MPISERIAL_LIBRARY (FILE) - Path to the C library file +# MPISERIAL_LIBRARIES (LIST) - link these to use MPISERIAL +# +include (LibFind) + +# Define MPISERIAL C component +define_package_component (MPISERIAL DEFAULT + COMPONENT C + INCLUDE_NAMES mpi.h + LIBRARY_NAMES mpi-serial) + +# Define MPISERIAL Fortran component +define_package_component (MPISERIAL + COMPONENT Fortran + INCLUDE_NAMES mpi.mod mpif.h + LIBRARY_NAMES mpi-serial) + +# Search for list of valid components requested +find_valid_components (MPISERIAL) + +#============================================================================== +# SEARCH FOR VALIDATED COMPONENTS +foreach (MPISERIAL_comp IN LISTS MPISERIAL_FIND_VALID_COMPONENTS) + + # If not found already, search... + if (NOT MPISERIAL_${MPISERIAL_comp}_FOUND) + + # Search for the package + find_package_component(MPISERIAL COMPONENT ${MPISERIAL_comp}) + + endif () + +endforeach () diff --git a/ParallelIO/cmake/FindNetCDF.cmake b/ParallelIO/cmake/FindNetCDF.cmake new file mode 100644 index 0000000000..344714b18a --- /dev/null +++ b/ParallelIO/cmake/FindNetCDF.cmake @@ -0,0 +1,143 @@ +# - Try to find NetCDF +# +# This can be controlled by setting the NetCDF_PATH (or, equivalently, the +# NETCDF environment variable), or NetCDF__PATH CMake variables, where +# is the COMPONENT language one needs. +# +# Once done, this will define: +# +# NetCDF__FOUND (BOOL) - system has NetCDF +# NetCDF__IS_SHARED (BOOL) - whether library is shared/dynamic +# NetCDF__INCLUDE_DIR (PATH) - Location of the C header file +# NetCDF__INCLUDE_DIRS (LIST) - the NetCDF include directories +# NetCDF__LIBRARY (FILE) - Path to the C library file +# NetCDF__LIBRARIES (LIST) - link these to use NetCDF +# +# The available COMPONENTS are: C Fortran +# If no components are specified, it assumes only C +include (LibFind) +include (LibCheck) + +# Define NetCDF C Component +define_package_component (NetCDF DEFAULT + COMPONENT C + INCLUDE_NAMES netcdf.h + LIBRARY_NAMES netcdf) + +# Define NetCDF Fortran Component +define_package_component (NetCDF + COMPONENT Fortran + INCLUDE_NAMES netcdf.mod netcdf.inc + LIBRARY_NAMES netcdff) + +# Search for list of valid components requested +find_valid_components (NetCDF) + +#============================================================================== +# SEARCH FOR VALIDATED COMPONENTS +foreach (NCDFcomp IN LISTS NetCDF_FIND_VALID_COMPONENTS) + + # If not found already, search... + if (NOT NetCDF_${NCDFcomp}_FOUND) + + # Manually add the MPI include and library dirs to search paths + # and search for the package component + if (MPI_${NCDFcomp}_FOUND) + initialize_paths (NetCDF_${NCDFcomp}_PATHS + INCLUDE_DIRECTORIES ${MPI_${NCDFcomp}_INCLUDE_PATH} + LIBRARIES ${MPI_${NCDFcomp}_LIBRARIES}) + find_package_component(NetCDF COMPONENT ${NCDFcomp} + PATHS ${NetCDF_${NCDFcomp}_PATHS}) + else () + find_package_component(NetCDF COMPONENT ${NCDFcomp}) + endif () + + # Continue only if component found + if (NetCDF_${NCDFcomp}_FOUND) + + # Checks + if (NCDFcomp STREQUAL C) + + # Check version + check_version (NetCDF + NAME "netcdf_meta.h" + HINTS ${NetCDF_C_INCLUDE_DIRS} + MACRO_REGEX "NC_VERSION_") + + # Check for parallel support + check_macro (NetCDF_C_HAS_PARALLEL + NAME TryNetCDF_PARALLEL.c + HINTS ${CMAKE_MODULE_PATH} + DEFINITIONS -I${NetCDF_C_INCLUDE_DIR} + COMMENT "whether NetCDF has parallel support") + + # Check if logging enabled + set(CMAKE_REQUIRED_INCLUDES ${NetCDF_C_INCLUDE_DIR}) + set(CMAKE_REQUIRED_LIBRARIES ${NetCDF_C_LIBRARIES}) + CHECK_FUNCTION_EXISTS(nc_set_log_level NetCDF_C_LOGGING_ENABLED) + + endif () + + # Dependencies + if (NCDFcomp STREQUAL C AND NOT NetCDF_C_IS_SHARED) + + # DEPENDENCY: PnetCDF (if PnetCDF enabled) + check_macro (NetCDF_C_HAS_PNETCDF + NAME TryNetCDF_PNETCDF.c + HINTS ${CMAKE_MODULE_PATH} + DEFINITIONS -I${NetCDF_C_INCLUDE_DIR} + COMMENT "whether NetCDF has PnetCDF support") + if (NetCDF_C_HAS_PNETCDF) + find_package (PnetCDF COMPONENTS C) + if (CURL_FOUND) + list (APPEND NetCDF_C_INCLUDE_DIRS ${PnetCDF_C_INCLUDE_DIRS}) + list (APPEND NetCDF_C_LIBRARIES ${PnetCDF_C_LIBRARIES}) + endif () + endif () + + # DEPENDENCY: CURL (If DAP enabled) + check_macro (NetCDF_C_HAS_DAP + NAME TryNetCDF_DAP.c + HINTS ${CMAKE_MODULE_PATH} + DEFINITIONS -I${NetCDF_C_INCLUDE_DIR} + COMMENT "whether NetCDF has DAP support") + if (NetCDF_C_HAS_DAP) + find_package (CURL) + if (CURL_FOUND) + list (APPEND NetCDF_C_INCLUDE_DIRS ${CURL_INCLUDE_DIRS}) + list (APPEND NetCDF_C_LIBRARIES ${CURL_LIBRARIES}) + endif () + endif () + + # DEPENDENCY: HDF5 + find_package (HDF5 COMPONENTS HL C) + if (HDF5_C_FOUND) + list (APPEND NetCDF_C_INCLUDE_DIRS ${HDF5_C_INCLUDE_DIRS} + ${HDF5_HL_INCLUDE_DIRS}) + list (APPEND NetCDF_C_LIBRARIES ${HDF5_C_LIBRARIES} + ${HDF5_HL_LIBRARIES}) + endif () + + # DEPENDENCY: LIBDL Math + list (APPEND NetCDF_C_LIBRARIES -ldl -lm) + + elseif (NCDFcomp STREQUAL Fortran AND NOT NetCDF_Fortran_IS_SHARED) + + # DEPENDENCY: NetCDF + set (orig_comp ${NCDFcomp}) + set (orig_comps ${NetCDF_FIND_VALID_COMPONENTS}) + find_package (NetCDF COMPONENTS C) + set (NetCDF_FIND_VALID_COMPONENTS ${orig_comps}) + set (NCDFcomp ${orig_comp}) + if (NetCDF_C_FOUND) + list (APPEND NetCDF_Fortran_INCLUDE_DIRS ${NetCDF_C_INCLUDE_DIRS}) + list (APPEND NetCDF_Fortran_LIBRARIES ${NetCDF_C_LIBRARIES}) + endif () + + endif () + + endif () + + endif () + +endforeach () diff --git a/ParallelIO/cmake/FindPAPI.cmake b/ParallelIO/cmake/FindPAPI.cmake new file mode 100644 index 0000000000..dcf1445bc7 --- /dev/null +++ b/ParallelIO/cmake/FindPAPI.cmake @@ -0,0 +1,28 @@ +# - Try to find PAPI +# +# This can be controlled by setting the PAPI_DIR (or, equivalently, the +# PAPI environment variable). +# +# Once done, this will define: +# +# PAPI_FOUND (BOOL) - system has PAPI +# PAPI_IS_SHARED (BOOL) - whether library is shared/dynamic +# PAPI_INCLUDE_DIR (PATH) - Location of the C header file +# PAPI_INCLUDE_DIRS (LIST) - the PAPI include directories +# PAPI_LIBRARY (FILE) - Path to the C library file +# PAPI_LIBRARIES (LIST) - link these to use PAPI +# +include (LibFind) + +# Define PAPI package +define_package_component (PAPI + INCLUDE_NAMES papi.h + LIBRARY_NAMES papi) + +# SEARCH FOR PACKAGE +if (NOT PAPI_FOUND) + + # Search for the package + find_package_component(PAPI) + +endif () diff --git a/ParallelIO/cmake/FindPnetCDF.cmake b/ParallelIO/cmake/FindPnetCDF.cmake new file mode 100644 index 0000000000..b87d245cd1 --- /dev/null +++ b/ParallelIO/cmake/FindPnetCDF.cmake @@ -0,0 +1,68 @@ +# - Try to find PnetCDF +# +# This can be controlled by setting the PnetCDF_PATH (or, equivalently, the +# PNETCDF environment variable), or PnetCDF__PATH CMake variables, where +# is the COMPONENT language one needs. +# +# Once done, this will define: +# +# PnetCDF__FOUND (BOOL) - system has PnetCDF +# PnetCDF__IS_SHARED (BOOL) - whether library is shared/dynamic +# PnetCDF__INCLUDE_DIR (PATH) - Location of the C header file +# PnetCDF__INCLUDE_DIRS (LIST) - the PnetCDF include directories +# PnetCDF__LIBRARY (FILE) - Path to the C library file +# PnetCDF__LIBRARIES (LIST) - link these to use PnetCDF +# +# The available COMPONENTS are: C, Fortran +# If no components are specified, it assumes only C +include (LibFind) +include (LibCheck) + +# Define PnetCDF C Component +define_package_component (PnetCDF DEFAULT + COMPONENT C + INCLUDE_NAMES pnetcdf.h + LIBRARY_NAMES pnetcdf) + +# Define PnetCDF Fortran Component +define_package_component (PnetCDF + COMPONENT Fortran + INCLUDE_NAMES pnetcdf.mod pnetcdf.inc + LIBRARY_NAMES pnetcdf) + +# Search for list of valid components requested +find_valid_components (PnetCDF) + +#============================================================================== +# SEARCH FOR VALIDATED COMPONENTS +foreach (PNCDFcomp IN LISTS PnetCDF_FIND_VALID_COMPONENTS) + + # If not found already, search... + if (NOT PnetCDF_${PNCDFcomp}_FOUND) + + # Manually add the MPI include and library dirs to search paths + # and search for the package component + if (MPI_${PNCDFcomp}_FOUND) + initialize_paths (PnetCDF_${PNCDFcomp}_PATHS + INCLUDE_DIRECTORIES ${MPI_${PNCDFcomp}_INCLUDE_PATH} + LIBRARIES ${MPI_${PNCDFcomp}_LIBRARIES}) + find_package_component(PnetCDF COMPONENT ${PNCDFcomp} + PATHS ${PnetCDF_${PNCDFcomp}_PATHS}) + else () + find_package_component(PnetCDF COMPONENT ${PNCDFcomp}) + endif () + + # Continue only if component found + if (PnetCDF_${PNCDFcomp}_FOUND) + + # Check version + check_version (PnetCDF + NAME "pnetcdf.h" + HINTS ${PnetCDF_${PNCDFcomp}_INCLUDE_DIR} + MACRO_REGEX "PNETCDF_VERSION_") + + endif () + + endif () + +endforeach () diff --git a/ParallelIO/cmake/FindSZIP.cmake b/ParallelIO/cmake/FindSZIP.cmake new file mode 100644 index 0000000000..e65cfe5fd6 --- /dev/null +++ b/ParallelIO/cmake/FindSZIP.cmake @@ -0,0 +1,37 @@ +# - Try to find SZIP +# +# This can be controlled by setting the SZIP_DIR (or, equivalently, the +# SZIP environment variable). +# +# Once done, this will define: +# +# SZIP_FOUND (BOOL) - system has SZIP +# SZIP_IS_SHARED (BOOL) - whether library is shared/dynamic +# SZIP_INCLUDE_DIR (PATH) - Location of the C header file +# SZIP_INCLUDE_DIRS (LIST) - the SZIP include directories +# SZIP_LIBRARY (FILE) - Path to the C library file +# SZIP_LIBRARIES (LIST) - link these to use SZIP +# +include (LibFind) + +# Define SZIP package +define_package_component (SZIP + INCLUDE_NAMES szlib.h + LIBRARY_NAMES sz) + +# SEARCH FOR PACKAGE +if (NOT SZIP_FOUND) + + # Manually add the MPI include and library dirs to search paths + # and search for the package component + if (MPI_C_FOUND) + initialize_paths (SZIP_PATHS + INCLUDE_DIRECTORIES ${MPI_C_INCLUDE_PATH} + LIBRARIES ${MPI_C_LIBRARIES}) + find_package_component(SZIP + PATHS ${SZIP_PATHS}) + else () + find_package_component(SZIP) + endif () + +endif () diff --git a/ParallelIO/cmake/LibCheck.cmake b/ParallelIO/cmake/LibCheck.cmake new file mode 100644 index 0000000000..3f12bdf796 --- /dev/null +++ b/ParallelIO/cmake/LibCheck.cmake @@ -0,0 +1,104 @@ +include (CMakeParseArguments) +include (CheckFunctionExists) +#============================================================================== +# +# FUNCTIONS TO HELP WITH Check* MODULES +# +#============================================================================== + +#______________________________________________________________________________ +# - Basic function to check a property of a package using a try_compile step +# +# SYNTAX: check_macro ( +# NAME +# HINTS ... +# DEFINITIONS ... +# COMMENT ) +# +function (check_macro VARIABLE) + + # Parse the input arguments + set (oneValueArgs COMMENT NAME) + set (multiValueArgs HINTS DEFINITIONS) + cmake_parse_arguments (${VARIABLE} "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + # If the return variable is defined, already, don't continue + if (NOT DEFINED ${VARIABLE}) + + message (STATUS "Checking ${${VARIABLE}_COMMENT}") + find_file (${VARIABLE}_TRY_FILE + NAMES ${${VARIABLE}_NAME} + HINTS ${${VARIABLE}_HINTS}) + if (${VARIABLE}_TRY_FILE) + try_compile (COMPILE_RESULT + ${CMAKE_CURRENT_BINARY_DIR}/try${VARIABLE} + SOURCES ${${VARIABLE}_TRY_FILE} + COMPILE_DEFINITIONS ${${VARIABLE}_DEFINITIONS} + OUTPUT_VARIABLE TryOUT) + if (COMPILE_RESULT) + message (STATUS "Checking ${${VARIABLE}_COMMENT} - yes") + else () + message (STATUS "Checking ${${VARIABLE}_COMMENT} - no") + endif () + + set (${VARIABLE} ${COMPILE_RESULT} + CACHE BOOL "${${VARIABLE}_COMMENT}") + + else () + message (STATUS "Checking ${${VARIABLE}_COMMENT} - failed") + endif () + + unset (${VARIABLE}_TRY_FILE CACHE) + endif () + +endfunction () + +#______________________________________________________________________________ +# - Basic function to check the version of a package using a try_run step +# +# SYNTAX: check_version ( +# NAME +# HINTS ... +# DEFINITIONS ...) +# +function (check_version PKG) + + # Parse the input arguments + set (oneValueArgs NAME MACRO_REGEX) + set (multiValueArgs HINTS) + cmake_parse_arguments (${PKG} "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + # If the return variable is defined, already, don't continue + if (NOT DEFINED ${PKG}_VERSION) + + message (STATUS "Checking ${PKG} version") + find_file (${PKG}_VERSION_HEADER + NAMES ${${PKG}_NAME} + HINTS ${${PKG}_HINTS}) + if (${PKG}_VERSION_HEADER) + set (def) + file (STRINGS ${${PKG}_VERSION_HEADER} deflines + REGEX "^#define[ \\t]+${${PKG}_MACRO_REGEX}") + foreach (defline IN LISTS deflines) + string (REPLACE "\"" "" defline "${defline}") + string (REPLACE "." "" defline "${defline}") + string (REGEX REPLACE "[ \\t]+" ";" deflist "${defline}") + list (GET deflist 2 arg) + list (APPEND def ${arg}) + endforeach () + string (REPLACE ";" "." vers "${def}") + message (STATUS "Checking ${PKG} version - ${vers}") + set (${PKG}_VERSION ${vers} + CACHE STRING "${PKG} version string") + if (${PKG}_VERSION VERSION_LESS ${PKG}_FIND_VERSION}) + message (FATAL_ERROR "${PKG} version insufficient") + endif () + else () + message (STATUS "Checking ${PKG} version - failed") + endif () + + unset (${PKG}_VERSION_HEADER CACHE) + + endif () + +endfunction () \ No newline at end of file diff --git a/ParallelIO/cmake/LibFind.cmake b/ParallelIO/cmake/LibFind.cmake new file mode 100644 index 0000000000..61cd93aa37 --- /dev/null +++ b/ParallelIO/cmake/LibFind.cmake @@ -0,0 +1,333 @@ +include (CMakeParseArguments) +include(FindPackageHandleStandardArgs) + +#============================================================================== +# +# FUNCTIONS TO HELP WITH Find* MODULES +# +#============================================================================== + +#______________________________________________________________________________ +# - Wrapper for finding static libraries ONLY +# +macro (find_static_library) + set (_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES}) + set (CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_STATIC_LIBRARY_SUFFIX}) + find_library(${ARGN}) + set (CMAKE_FIND_LIBRARY_SUFFIXES ${_CMAKE_FIND_LIBRARY_SUFFIXES}) + unset (_CMAKE_FIND_LIBRARY_SUFFIXES) +endmacro () + + +#______________________________________________________________________________ +# - Wrapper for finding shared/dynamic libraries ONLY +# +macro (find_shared_library) + set (_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES}) + set (CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_SHARED_LIBRARY_SUFFIX}) + find_library(${ARGN}) + set (CMAKE_FIND_LIBRARY_SUFFIXES ${_CMAKE_FIND_LIBRARY_SUFFIXES}) + unset (_CMAKE_FIND_LIBRARY_SUFFIXES) +endmacro () + + +#______________________________________________________________________________ +# - Function to determine type (SHARED or STATIC) of library +# +# Input: +# LIB (FILE) +# +# Returns: +# RETURN_VAR (BOOL) +# +function (is_shared_library RETURN_VAR LIB) + get_filename_component(libext ${LIB} EXT) + if (libext MATCHES ${CMAKE_SHARED_LIBRARY_SUFFIX}) + set (${RETURN_VAR} TRUE PARENT_SCOPE) + else () + set (${RETURN_VAR} FALSE PARENT_SCOPE) + endif () +endfunction () + + +#______________________________________________________________________________ +# - Function to define a valid package component +# +# Input: +# ${PKG}_DEFAULT (BOOL) +# ${PKG}_COMPONENT (STRING) +# ${PKG}_INCLUDE_NAMES (LIST) +# ${PKG}_LIBRARY_NAMES (LIST) +# +# Returns: +# ${PKG}_DEFAULT_COMPONENT (STRING) +# ${PKG}_VALID_COMPONENTS (LIST) +# ${PKG}_${COMPONENT}_INCLUDE_NAMES (LIST) +# ${PKG}_${COMPONENT}_LIBRARY_NAMES (LIST) +# +function (define_package_component PKG) + + # Parse the input arguments + set (options DEFAULT) + set (oneValueArgs COMPONENT) + set (multiValueArgs INCLUDE_NAMES LIBRARY_NAMES) + cmake_parse_arguments (${PKG} "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + if (${PKG}_COMPONENT) + set (PKGCOMP ${PKG}_${${PKG}_COMPONENT}) + else () + set (PKGCOMP ${PKG}) + endif () + + # Set return values + if (${PKG}_COMPONENT) + if (${PKG}_DEFAULT) + set (${PKG}_DEFAULT_COMPONENT ${${PKG}_COMPONENT} PARENT_SCOPE) + endif () + set (VALID_COMPONENTS ${${PKG}_VALID_COMPONENTS}) + list (APPEND VALID_COMPONENTS ${${PKG}_COMPONENT}) + set (${PKG}_VALID_COMPONENTS ${VALID_COMPONENTS} PARENT_SCOPE) + endif () + set (${PKGCOMP}_INCLUDE_NAMES ${${PKG}_INCLUDE_NAMES} PARENT_SCOPE) + set (${PKGCOMP}_LIBRARY_NAMES ${${PKG}_LIBRARY_NAMES} PARENT_SCOPE) + +endfunction () + + +#______________________________________________________________________________ +# - Function to find valid package components +# +# Assumes pre-defined variables: +# ${PKG}_FIND_COMPONENTS (LIST) +# ${PKG}_DEFAULT_COMPONENT (STRING) +# ${PKG}_VALID_COMPONENTS (LIST) +# +# Returns: +# ${PKG}_FIND_VALID_COMPONENTS (LIST) +# +function (find_valid_components PKG) + + if (NOT ${PKG}_FIND_COMPONENTS) + set (${PKG}_FIND_COMPONENTS ${${PKG}_DEFAULT_COMPONENT}) + endif () + + set (FIND_VALID_COMPONENTS) + foreach (comp IN LISTS ${PKG}_FIND_COMPONENTS) + if (";${${PKG}_VALID_COMPONENTS};" MATCHES ";${comp};") + list (APPEND FIND_VALID_COMPONENTS ${comp}) + endif () + endforeach () + + set (${PKG}_FIND_VALID_COMPONENTS ${FIND_VALID_COMPONENTS} PARENT_SCOPE) + +endfunction () + + +#______________________________________________________________________________ +# - Initialize a list of paths from a list of includes and libraries +# +# Input: +# INCLUDE_DIRECTORIES +# LIBRARIES +# +# Ouput: +# ${PATHLIST} +# +function (initialize_paths PATHLIST) + + # Parse the input arguments + set (multiValueArgs INCLUDE_DIRECTORIES LIBRARIES) + cmake_parse_arguments (INIT "" "" "${multiValueArgs}" ${ARGN}) + + set (paths) + foreach (inc IN LISTS INIT_INCLUDE_DIRECTORIES) + list (APPEND paths ${inc}) + get_filename_component (dname ${inc} NAME) + if (dname MATCHES "include") + get_filename_component (prefx ${inc} PATH) + list (APPEND paths ${prefx}) + endif () + endforeach () + foreach (lib IN LISTS INIT_LIBRARIES) + get_filename_component (libdir ${lib} PATH) + list (APPEND paths ${libdir}) + get_filename_component (dname ${libdir} PATH) + if (dname MATCHES "lib") + get_filename_component (prefx ${libdir} PATH) + list (APPEND paths ${prefx}) + endif () + endforeach () + + set (${PATHLIST} ${paths} PARENT_SCOPE) + +endfunction () + + +#______________________________________________________________________________ +# - Basic find package macro for a specific component +# +# Assumes pre-defined variables: +# ${PKG}_${COMP}_INCLUDE_NAMES or ${PKG}_INCLUDE_NAMES +# ${PKG}_${COMP}_LIBRARY_NAMES or ${PKG}_LIBRARY_NAMES +# +# Input: +# ${PKG}_COMPONENT +# ${PKG}_HINTS +# ${PKG}_PATHS +# +function (find_package_component PKG) + + # Parse the input arguments + set (options) + set (oneValueArgs COMPONENT) + set (multiValueArgs HINTS PATHS) + cmake_parse_arguments (${PKG} "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + set (COMP ${${PKG}_COMPONENT}) + if (COMP) + set (PKGCOMP ${PKG}_${COMP}) + else () + set (PKGCOMP ${PKG}) + endif () + string (TOUPPER ${PKG} PKGUP) + string (TOUPPER ${PKGCOMP} PKGCOMPUP) + + # Only continue if package not found already + if (NOT ${PKGCOMP}_FOUND) + + # Handle QUIET and REQUIRED arguments + if (${${PKG}_FIND_QUIETLY}) + set (${PKGCOMP}_FIND_QUIETLY TRUE) + endif () + if (${${PKG}_FIND_REQUIRED}) + set (${PKGCOMP}_FIND_REQUIRED TRUE) + endif () + + # Determine search order + set (SEARCH_DIRS) + if (${PKG}_HINTS) + list (APPEND SEARCH_DIRS ${${PKG}_HINTS}) + endif () + if (${PKGCOMP}_PATH) + list (APPEND SEARCH_DIRS ${${PKGCOMP}_PATH}) + endif () + if (${PKG}_PATH) + list (APPEND SEARCH_DIRS ${${PKG}_PATH}) + endif () + if (DEFINED ENV{${PKGCOMPUP}}) + list (APPEND SEARCH_DIRS $ENV{${PKGCOMPUP}}) + endif () + if (DEFINED ENV{${PKGUP}}) + list (APPEND SEARCH_DIRS $ENV{${PKGUP}}) + endif () + if (CMAKE_SYSTEM_PREFIX_PATH) + list (APPEND SEARCH_DIRS ${CMAKE_SYSTEM_PREFIX_PATH}) + endif () + if (${PKG}_PATHS) + list (APPEND SEARCH_DIRS ${${PKG}_PATHS}) + endif () + + # Start the search for the include file and library file. Only overload + # if the variable is not defined. + foreach (suffix PREFIX LIBRARY INCLUDE_DIR) + if (NOT DEFINED ${PKGCOMP}_${suffix}) + set (${PKGCOMP}_${suffix} ${PKGCOMP}_${suffix}-NOTFOUND) + endif () + endforeach () + + foreach (dir IN LISTS SEARCH_DIRS) + + # Search for include file names in current dirrectory + foreach (iname IN LISTS ${PKGCOMP}_INCLUDE_NAMES) + if (EXISTS ${dir}/${iname}) + set (${PKGCOMP}_PREFIX ${dir}) + set (${PKGCOMP}_INCLUDE_DIR ${dir}) + break () + endif () + if (EXISTS ${dir}/include/${iname}) + set (${PKGCOMP}_PREFIX ${dir}) + set (${PKGCOMP}_INCLUDE_DIR ${dir}/include) + break () + endif () + endforeach () + + # Search for library file names in the found prefix only! + if (${PKGCOMP}_PREFIX) + find_library (${PKGCOMP}_LIBRARY + NAMES ${${PKGCOMP}_LIBRARY_NAMES} + PATHS ${${PKGCOMP}_PREFIX} + PATH_SUFFIXES lib + NO_DEFAULT_PATH) + + # If found, check if library is static or dynamic + if (${PKGCOMP}_LIBRARY) + is_shared_library (${PKGCOMP}_IS_SHARED ${${PKGCOMP}_LIBRARY}) + + # If we want only shared libraries, and it isn't shared... + if (PREFER_SHARED AND NOT ${PKGCOMP}_IS_SHARED) + find_shared_library (${PKGCOMP}_SHARED_LIBRARY + NAMES ${${PKGCOMP}_LIBRARY_NAMES} + PATHS ${${PKGCOMP}_PREFIX} + PATH_SUFFIXES lib + NO_DEFAULT_PATH) + if (${PKGCOMP}_SHARED_LIBRARY) + set (${PKGCOMP}_LIBRARY ${${PKGCOMP}_SHARED_LIBRARY}) + set (${PKGCOMP}_IS_SHARED TRUE) + endif () + + # If we want only static libraries, and it is shared... + elseif (PREFER_STATIC AND ${PKGCOMP}_IS_SHARED) + find_static_library (${PKGCOMP}_STATIC_LIBRARY + NAMES ${${PKGCOMP}_LIBRARY_NAMES} + PATHS ${${PKGCOMP}_PREFIX} + PATH_SUFFIXES lib + NO_DEFAULT_PATH) + if (${PKGCOMP}_STATIC_LIBRARY) + set (${PKGCOMP}_LIBRARY ${${PKGCOMP}_STATIC_LIBRARY}) + set (${PKGCOMP}_IS_SHARED FALSE) + endif () + endif () + endif () + + # If include dir and library both found, then we're done + if (${PKGCOMP}_INCLUDE_DIR AND ${PKGCOMP}_LIBRARY) + break () + + # Otherwise, reset the search variables and continue + else () + set (${PKGCOMP}_PREFIX ${PKGCOMP}_PREFIX-NOTFOUND) + set (${PKGCOMP}_INCLUDE_DIR ${PKGCOMP}_INCLUDE_DIR-NOTFOUND) + set (${PKGCOMP}_LIBRARY ${PKGCOMP}_LIBRARY-NOTFOUND) + endif () + endif () + + endforeach () + + # handle the QUIETLY and REQUIRED arguments and + # set NetCDF_C_FOUND to TRUE if all listed variables are TRUE + find_package_handle_standard_args (${PKGCOMP} DEFAULT_MSG + ${PKGCOMP}_LIBRARY + ${PKGCOMP}_INCLUDE_DIR) + mark_as_advanced (${PKGCOMP}_INCLUDE_DIR ${PKGCOMP}_LIBRARY) + + # HACK For bug in CMake v3.0: + set (${PKGCOMP}_FOUND ${${PKGCOMPUP}_FOUND}) + + # Set return variables + if (${PKGCOMP}_FOUND) + set (${PKGCOMP}_INCLUDE_DIRS ${${PKGCOMP}_INCLUDE_DIR}) + set (${PKGCOMP}_LIBRARIES ${${PKGCOMP}_LIBRARY}) + endif () + + # Set variables in parent scope + set (${PKGCOMP}_FOUND ${${PKGCOMP}_FOUND} PARENT_SCOPE) + set (${PKGCOMP}_INCLUDE_DIR ${${PKGCOMP}_INCLUDE_DIR} PARENT_SCOPE) + set (${PKGCOMP}_INCLUDE_DIRS ${${PKGCOMP}_INCLUDE_DIRS} PARENT_SCOPE) + set (${PKGCOMP}_LIBRARY ${${PKGCOMP}_LIBRARY} PARENT_SCOPE) + set (${PKGCOMP}_LIBRARIES ${${PKGCOMP}_LIBRARIES} PARENT_SCOPE) + set (${PKGCOMP}_IS_SHARED ${${PKGCOMP}_IS_SHARED} PARENT_SCOPE) + + endif () + +endfunction () + + + diff --git a/ParallelIO/cmake/LibMPI.cmake b/ParallelIO/cmake/LibMPI.cmake new file mode 100644 index 0000000000..48efe976a9 --- /dev/null +++ b/ParallelIO/cmake/LibMPI.cmake @@ -0,0 +1,124 @@ +# This is part of the PIO library. + +# THis file contains CMake code related to MPI. + +# Jim Edwards +include (CMakeParseArguments) + +# Find Valgrind to perform memory leak check +if (PIO_VALGRIND_CHECK) + find_program (VALGRIND_COMMAND NAMES valgrind) + if (VALGRIND_COMMAND) + set (VALGRIND_COMMAND_OPTIONS --leak-check=full --show-reachable=yes) + else () + message (WARNING "Valgrind not found: memory leak check could not be performed") + set (VALGRIND_COMMAND "") + endif () +endif () + +# +# - Functions for parallel testing with CTest +# + +#============================================================================== +# - Get the machine platform-specific +# +# Syntax: platform_name (RETURN_VARIABLE) +# +function (platform_name RETURN_VARIABLE) + + # Determine platform name from site name... + site_name (SITENAME) + + + if (SITENAME MATCHES "^laramie" OR + SITENAME MATCHES "^cheyenne" OR + SITENAME MATCHES "^chadmin") + + set (${RETURN_VARIABLE} "nwscla" PARENT_SCOPE) + + # ALCF/Argonne Machines + elseif (SITENAME MATCHES "^mira" OR + SITENAME MATCHES "^cetus" OR + SITENAME MATCHES "^vesta" OR + SITENAME MATCHES "^cooley") + + set (${RETURN_VARIABLE} "alcf" PARENT_SCOPE) + + # NERSC Machines + elseif (SITENAME MATCHES "^edison" OR + SITENAME MATCHES "^cori") + + set (${RETURN_VARIABLE} "nersc" PARENT_SCOPE) + + # NCSA Machine (Blue Waters) + elseif (SITENAME MATCHES "^h2ologin") + + set (${RETURN_VARIABLE} "ncsa" PARENT_SCOPE) + + # OLCF/Oak Ridge Machines + elseif (SITENAME MATCHES "^eos" OR + SITENAME MATCHES "^titan") + + set (${RETURN_VARIABLE} "olcf" PARENT_SCOPE) + + else () + + set (${RETURN_VARIABLE} "unknown" PARENT_SCOPE) + + endif () +endfunction () + +#============================================================================== +# - Add a new parallel test +# +# Syntax: add_mpi_test ( +# EXECUTABLE +# ARGUMENTS ... +# NUMPROCS +# TIMEOUT ) +function (add_mpi_test TESTNAME) + + # Parse the input arguments + set (options) + set (oneValueArgs NUMPROCS TIMEOUT EXECUTABLE) + set (multiValueArgs ARGUMENTS) + cmake_parse_arguments (${TESTNAME} "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + # Store parsed arguments for convenience + set (exec_file ${${TESTNAME}_EXECUTABLE}) + set (exec_args ${${TESTNAME}_ARGUMENTS}) + set (num_procs ${${TESTNAME}_NUMPROCS}) + set (timeout ${${TESTNAME}_TIMEOUT}) + + # Get the platform name + platform_name (PLATFORM) + + get_property(WITH_MPIEXEC GLOBAL PROPERTY WITH_MPIEXEC) + if (WITH_MPIEXEC) + set(MPIEXEC "${WITH_MPIEXEC}") + endif () + + # Default ("unknown" platform) execution + if (PLATFORM STREQUAL "unknown") + + # Run tests directly from the command line + set(EXE_CMD ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${num_procs} + ${MPIEXEC_PREFLAGS} ${VALGRIND_COMMAND} ${VALGRIND_COMMAND_OPTIONS} ${exec_file} + ${MPIEXEC_POSTFLAGS} ${exec_args}) + + else () + + # Run tests from the platform-specific executable + set (EXE_CMD ${CMAKE_SOURCE_DIR}/cmake/mpiexec.${PLATFORM} + ${num_procs} ${VALGRIND_COMMAND} ${VALGRIND_COMMAND_OPTIONS} ${exec_file} ${exec_args}) + + endif () + + # Add the test to CTest + add_test(NAME ${TESTNAME} COMMAND ${EXE_CMD}) + + # Adjust the test timeout + set_tests_properties(${TESTNAME} PROPERTIES TIMEOUT ${timeout}) + +endfunction() diff --git a/ParallelIO/cmake/Makefile.am b/ParallelIO/cmake/Makefile.am new file mode 100644 index 0000000000..9b2ea30f96 --- /dev/null +++ b/ParallelIO/cmake/Makefile.am @@ -0,0 +1,13 @@ +## This is the automake file for the cmake directory of the PIO +## libraries. This directory holds files needed for the CMake build, +## but not the autotools build. + +# Ed Hartnett 8/19/19 + +# Cmake needs all these extra files to build. +EXTRA_DIST = FindGPTL.cmake FindHDF5.cmake FindLIBRT.cmake \ +FindLIBZ.cmake FindMPE.cmake FindMPISERIAL.cmake FindNetCDF.cmake \ +FindPAPI.cmake FindPnetCDF.cmake FindSZIP.cmake LibCheck.cmake \ +LibFind.cmake LibMPI.cmake Makefile.am mpiexec.alcf mpiexec.ncsa \ +mpiexec.nersc mpiexec.nwscla mpiexec.olcf TryHDF5_HAS_SZIP.c \ +TryNetCDF_DAP.c TryNetCDF_PARALLEL.c TryNetCDF_PNETCDF.c diff --git a/ParallelIO/cmake/TryHDF5_HAS_SZIP.c b/ParallelIO/cmake/TryHDF5_HAS_SZIP.c new file mode 100644 index 0000000000..c4013455c2 --- /dev/null +++ b/ParallelIO/cmake/TryHDF5_HAS_SZIP.c @@ -0,0 +1,13 @@ +/* + * HDF5 C Test for szip filter + */ +#include "H5pubconf.h" + +int main() +{ +#if H5_HAVE_FILTER_SZIP==1 + return 0; +#else + XXX; +#endif +} diff --git a/ParallelIO/cmake/TryNetCDF_DAP.c b/ParallelIO/cmake/TryNetCDF_DAP.c new file mode 100644 index 0000000000..4d2f77fd98 --- /dev/null +++ b/ParallelIO/cmake/TryNetCDF_DAP.c @@ -0,0 +1,13 @@ +/* + * NetCDF C Test for DAP Support + */ +#include "netcdf_meta.h" + +int main() +{ +#if NC_HAS_DAP==1 || NC_HAS_DAP2==1 || NC_HAS_DAP4==1 + return 0; +#else + XXX; +#endif +} diff --git a/ParallelIO/cmake/TryNetCDF_PARALLEL.c b/ParallelIO/cmake/TryNetCDF_PARALLEL.c new file mode 100644 index 0000000000..7b041f6371 --- /dev/null +++ b/ParallelIO/cmake/TryNetCDF_PARALLEL.c @@ -0,0 +1,13 @@ +/* + * NetCDF C Test for parallel Support + */ +#include "netcdf_meta.h" + +int main() +{ +#if NC_HAS_PARALLEL==1 + return 0; +#else + XXX; +#endif +} diff --git a/ParallelIO/cmake/TryNetCDF_PNETCDF.c b/ParallelIO/cmake/TryNetCDF_PNETCDF.c new file mode 100644 index 0000000000..60a0c08864 --- /dev/null +++ b/ParallelIO/cmake/TryNetCDF_PNETCDF.c @@ -0,0 +1,13 @@ +/* + * NetCDF C Test for PnetCDF Support + */ +#include "netcdf_meta.h" + +int main() +{ +#if NC_HAS_PNETCDF==1 + return 0; +#else + XXX; +#endif +} diff --git a/ParallelIO/cmake/mpiexec.alcf b/ParallelIO/cmake/mpiexec.alcf new file mode 100755 index 0000000000..48765fd022 --- /dev/null +++ b/ParallelIO/cmake/mpiexec.alcf @@ -0,0 +1,16 @@ +#!/bin/bash +# +# Arguments: +# +# $1 - Number of MPI Tasks +# $2+ - Executable and its arguments +# + +NP=$1 +shift + +${BGQ_RUNJOB:-runjob} --np $NP --block $COBALT_PARTNAME \ + --envs GPFSMPIO_NAGG_PSET=16 GPFSMPIO_ONESIDED_ALWAYS_RMW=1 \ + GPFSMPIO_BALANCECONTIG=1 GPFSMPIO_WRITE_AGGMETHOD=2 \ + GPFSMPIO_READ_AGGMETHOD=2 PAMID_TYPED_ONESIDED=1 \ + PAMID_RMA_PENDING=1M GPFSMPIO_BRIDGERINGAGG=1 : $@ diff --git a/ParallelIO/cmake/mpiexec.ncsa b/ParallelIO/cmake/mpiexec.ncsa new file mode 100755 index 0000000000..2bb0d1c846 --- /dev/null +++ b/ParallelIO/cmake/mpiexec.ncsa @@ -0,0 +1,12 @@ +#!/bin/bash +# +# Arguments: +# +# $1 - Number of MPI Tasks +# $2+ - Executable and its arguments +# + +NP=$1 +shift + +aprun -n $NP $@ diff --git a/ParallelIO/cmake/mpiexec.nersc b/ParallelIO/cmake/mpiexec.nersc new file mode 100755 index 0000000000..e8774b0e98 --- /dev/null +++ b/ParallelIO/cmake/mpiexec.nersc @@ -0,0 +1,12 @@ +#!/bin/bash +# +# Arguments: +# +# $1 - Number of MPI Tasks +# $2+ - Executable and its arguments +# + +NP=$1 +shift + +srun -n $NP $@ diff --git a/ParallelIO/cmake/mpiexec.nwscla b/ParallelIO/cmake/mpiexec.nwscla new file mode 100755 index 0000000000..9aea7be13e --- /dev/null +++ b/ParallelIO/cmake/mpiexec.nwscla @@ -0,0 +1,11 @@ +#!/bin/bash +# +# Arguments: +# +# $1 - Number of MPI Tasks +# $2+ - Executable and its arguments +# + +NP=$1 +shift +mpirun -np $NP $@ diff --git a/ParallelIO/cmake/mpiexec.olcf b/ParallelIO/cmake/mpiexec.olcf new file mode 100755 index 0000000000..2bb0d1c846 --- /dev/null +++ b/ParallelIO/cmake/mpiexec.olcf @@ -0,0 +1,12 @@ +#!/bin/bash +# +# Arguments: +# +# $1 - Number of MPI Tasks +# $2+ - Executable and its arguments +# + +NP=$1 +shift + +aprun -n $NP $@ diff --git a/ParallelIO/cmake_config.h.in b/ParallelIO/cmake_config.h.in new file mode 100644 index 0000000000..586030c202 --- /dev/null +++ b/ParallelIO/cmake_config.h.in @@ -0,0 +1,48 @@ +/** @file + * + * This is the template for the config.h file, which is created at + * build-time by cmake. + */ +#ifndef _PIO_CONFIG_ +#define _PIO_CONFIG_ + +/** The major part of the version number. */ +#define PIO_VERSION_MAJOR @VERSION_MAJOR@ + +/** The minor part of the version number. */ +#define PIO_VERSION_MINOR @VERSION_MINOR@ + +/** The patch part of the version number. */ +#define PIO_VERSION_PATCH @VERSION_PATCH@ + +/** Set to non-zero to turn on logging. Output may be large. */ +#define PIO_ENABLE_LOGGING @ENABLE_LOGGING@ + +/** Size of MPI_Offset type. */ +#define SIZEOF_MPI_OFFSET @SIZEOF_MPI_OFFSET@ + +/* buffer size for darray data. */ +#define PIO_BUFFER_SIZE @PIO_BUFFER_SIZE@ + +#define USE_VARD @USE_VARD@ + +/* Does netCDF support netCDF/HDF5 files? */ +#cmakedefine HAVE_NETCDF4 + +/* Does netCDF support parallel I/O for netCDF/HDF5 files? */ +#cmakedefine HAVE_NETCDF_PAR + +/* Does PIO support netCDF/HDF5 files? (Will be same as + * HAVE_NETCDF_PAR). */ +#cmakedefine _NETCDF4 + +/* Does netCDF and HDF5 support parallel I/O filters? */ +#cmakedefine HAVE_PAR_FILTERS + +/* Was PIO built with netCDF integration? */ +#cmakedefine NETCDF_INTEGRATION + +/* Does PIO support using pnetcdf for I/O? */ +#cmakedefine _PNETCDF + +#endif /* _PIO_CONFIG_ */ diff --git a/ParallelIO/configure.ac b/ParallelIO/configure.ac new file mode 100644 index 0000000000..007b1fdedc --- /dev/null +++ b/ParallelIO/configure.ac @@ -0,0 +1,502 @@ +## This is the autoconf file for the PIO library. +## Ed Hartnett 8/16/17 + +# Initialize autoconf and automake. +AC_INIT(pio, 2.5.10) +AC_CONFIG_SRCDIR(src/clib/pio_darray.c) +AM_INIT_AUTOMAKE([foreign serial-tests]) + + +# The PIO version, again. Use AC_SUBST for pio_meta.h and +# AC_DEFINE_UNQUOTED for config.h. +AC_SUBST([PIO_VERSION_MAJOR]) PIO_VERSION_MAJOR=2 +AC_SUBST([PIO_VERSION_MINOR]) PIO_VERSION_MINOR=5 +AC_SUBST([PIO_VERSION_PATCH]) PIO_VERSION_PATCH=10 + +AC_DEFINE_UNQUOTED([PIO_VERSION_MAJOR], [$PIO_VERSION_MAJOR], [PIO major version]) +AC_DEFINE_UNQUOTED([PIO_VERSION_MINOR], [$PIO_VERSION_MINOR], [PIO minor version]) +AC_DEFINE_UNQUOTED([PIO_VERSION_PATCH], [$PIO_VERSION_PATCH], [PIO patch version]) + +# Once more for the documentation. +AC_SUBST([VERSION_MAJOR], [2]) +AC_SUBST([VERSION_MINOR], [5]) +AC_SUBST([VERSION_PATCH], [10]) + +# The m4 directory holds macros for autoconf. +AC_CONFIG_MACRO_DIR([m4]) + +# Configuration date. This follows convention of allowing +# SOURCE_DATE_EPOCH to be used to specify a timestamp, to allow +# byte-for-byte reproducable software builds. +if test "x$SOURCE_DATE_EPOCH" != "x" ; then + AC_SUBST([CONFIG_DATE]) CONFIG_DATE="`date -u -d "${SOURCE_DATE_EPOCH}"`" +else + AC_SUBST([CONFIG_DATE]) CONFIG_DATE="`date`" +fi + +# Libtool initialisation. +LD=ld # Required for MPE to work. +LT_INIT + +# Find and learn about install and ranlib. +AC_PROG_INSTALL +AC_PROG_RANLIB + +# Find and learn about the C compiler. +AC_PROG_CC +AC_PROG_CC_C99 + +# Compiler with version information. This consists of the full path +# name of the compiler and the reported version number. +AC_SUBST([CC_VERSION]) +# Strip anything that looks like a flag off of $CC +CC_NOFLAGS=`echo $CC | sed 's/ -.*//'` + +if `echo $CC_NOFLAGS | grep ^/ >/dev/null 2>&1`; then + CC_VERSION="$CC" +else + CC_VERSION="$CC"; + for x in `echo $PATH | sed -e 's/:/ /g'`; do + if test -x $x/$CC_NOFLAGS; then + CC_VERSION="$x/$CC" + break + fi + done +fi + +if test -n "$cc_version_info"; then + CC_VERSION="$CC_VERSION ( $cc_version_info)" +fi + +# Find and learn about the Fortran compiler. +AC_PROG_FC + +# Compiler with version information. This consists of the full path +# name of the compiler and the reported version number. +AC_SUBST([FC_VERSION]) +# Strip anything that looks like a flag off of $FC +FC_NOFLAGS=`echo $FC | sed 's/ -.*//'` + +if `echo $FC_NOFLAGS | grep ^/ >/dev/null 2>&1`; then + FC_VERSION="$FC" +else + FC_VERSION="$FC"; + for x in `echo $PATH | sed -e 's/:/ /g'`; do + if test -x $x/$FC_NOFLAGS; then + FC_VERSION="$x/$FC" + break + fi + done +fi +if test -n "$fc_version_info"; then + FC_VERSION="$FC_VERSION ( $fc_version_info)" +fi + +AC_MSG_CHECKING([whether a PIO_BUFFER_SIZE was specified]) +AC_ARG_WITH([piobuffersize], + [AS_HELP_STRING([--with-piobuffersize=], + [Specify buffer size for PIO.])], + [PIO_BUFFER_SIZE=$with_piobuffersize], [PIO_BUFFER_SIZE=134217728]) +AC_MSG_RESULT([$PIO_BUFFER_SIZE]) +AC_DEFINE_UNQUOTED([PIO_BUFFER_SIZE], [$PIO_BUFFER_SIZE], [buffer size for darray data.]) + +# Does the user want to enable logging? +AC_MSG_CHECKING([whether debug logging is enabled]) +AC_ARG_ENABLE([logging], + [AS_HELP_STRING([--enable-logging], + [enable debug logging capability (will negatively impact performance). \ + This debugging feature is probably only of interest to PIO developers.])]) +test "x$enable_logging" = xyes || enable_logging=no +AC_MSG_RESULT([$enable_logging]) +if test "x$enable_logging" = xyes; then + AC_DEFINE([PIO_ENABLE_LOGGING], 1, [If true, turn on logging.]) +fi + +# Does the user want to enable timing? +AC_MSG_CHECKING([whether GPTL timing library is used]) +AC_ARG_ENABLE([timing], + [AS_HELP_STRING([--enable-timing], + [enable use of the GPTL timing library.])]) +test "x$enable_timing" = xyes || enable_timing=no +AC_MSG_RESULT([$enable_timing]) +if test "x$enable_timing" = xyes; then + AC_DEFINE([TIMING], 1, [If true, use GPTL timing library.]) + AC_DEFINE([HAVE_MPI], [1], [required by GPTL timing library]) +fi +AM_CONDITIONAL(USE_GPTL, [test "x$enable_timing" = xyes]) + +# Does the user want to disable papi? +AC_MSG_CHECKING([whether PAPI should be enabled (if enable-timing is used)]) +AC_ARG_ENABLE([papi], [AS_HELP_STRING([--disable-papi], + [disable PAPI library use])]) +test "x$enable_papi" = xno || enable_papi=yes +AC_MSG_RESULT($enable_papi) + +# Does the user want to disable test runs? +AC_MSG_CHECKING([whether test runs should be enabled for make check]) +AC_ARG_ENABLE([test-runs], [AS_HELP_STRING([--disable-test-runs], + [disable running run_test.sh test scripts for make check. Tests will still be built.])]) +test "x$enable_test_runs" = xno || enable_test_runs=yes +AC_MSG_RESULT($enable_test_runs) +AM_CONDITIONAL(RUN_TESTS, [test "x$enable_test_runs" = xyes]) + +# Does the user want to enable Fortran library? +AC_MSG_CHECKING([whether Fortran library should be built]) +AC_ARG_ENABLE([fortran], + [AS_HELP_STRING([--enable-fortran], + [build the PIO Fortran library.])]) +test "x$enable_fortran" = xyes || enable_fortran=no +AC_MSG_RESULT([$enable_fortran]) +AM_CONDITIONAL(BUILD_FORTRAN, [test "x$enable_fortran" = xyes]) + +# Does the user want to use MPE library? +AC_MSG_CHECKING([whether use of MPE library is enabled]) +AC_ARG_ENABLE([mpe], + [AS_HELP_STRING([--enable-mpe], + [enable use of MPE library for timing and diagnostic info (may negatively impact performance).])]) +test "x$enable_mpe" = xyes || enable_mpe=no +AC_MSG_RESULT([$enable_mpe]) +if test "x$enable_mpe" = xyes; then + + AC_SEARCH_LIBS([pthread_setspecific], [pthread], [], [], []) + AC_SEARCH_LIBS([MPE_Log_get_event_number], [mpe], [HAVE_LIBMPE=yes], [HAVE_LIBMPE=no], []) + AC_SEARCH_LIBS([MPE_Init_mpi_core], [lmpe], [HAVE_LIBLMPE=yes], [HAVE_LIBLMPE=no], []) + AC_CHECK_HEADERS([mpe.h], [HAVE_MPE=yes], [HAVE_MPE=no]) + if test "x$HAVE_LIBMPE" != xyes; then + AC_MSG_ERROR([-lmpe not found but --enable-mpe used.]) + fi + if test "x$HAVE_LIBLMPE" != xyes; then + AC_MSG_ERROR([-llmpe not found but --enable-mpe used.]) + fi + if test $enable_fortran = yes; then + AC_MSG_ERROR([MPE not implemented in Fortran tests and examples. Build without --enable-fortran]) + fi + AC_DEFINE([USE_MPE], 1, [If true, use MPE timing library.]) + +fi + +# Does the user want to disable pnetcdf? +AC_MSG_CHECKING([whether pnetcdf is to be used]) +AC_ARG_ENABLE([pnetcdf], + [AS_HELP_STRING([--disable-pnetcdf], + [Disable pnetcdf use.])]) +test "x$enable_pnetcdf" = xno || enable_pnetcdf=yes +AC_MSG_RESULT([$enable_pnetcdf]) +AM_CONDITIONAL(BUILD_PNETCDF, [test "x$enable_pnetcdf" = xyes]) + +# Does the user want to build documentation? +AC_MSG_CHECKING([whether documentation should be build (requires doxygen)]) +AC_ARG_ENABLE([docs], + [AS_HELP_STRING([--enable-docs], + [enable building of documentation with doxygen.])]) +test "x$enable_docs" = xyes || enable_docs=no +AC_MSG_RESULT([$enable_docs]) + +# Does the user want to developer documentation? +AC_MSG_CHECKING([whether PIO developer documentation should be build (only for PIO developers)]) +AC_ARG_ENABLE([developer-docs], + [AS_HELP_STRING([--enable-developer-docs], + [enable building of PIO developer documentation with doxygen.])]) +test "x$enable_developer_docs" = xyes || enable_developer_docs=no +AC_MSG_RESULT([$enable_developer_docs]) + +# Developer docs enables docs. +if test "x$enable_developer_docs" = xyes; then + enable_docs=yes +fi +AM_CONDITIONAL(BUILD_DOCS, [test "x$enable_docs" = xyes]) + +# Did the user specify an MPI launcher other than mpiexec? +AC_MSG_CHECKING([whether a user specified program to run mpi programs]) +AC_ARG_WITH([mpiexec], + [AS_HELP_STRING([--with-mpiexec=], + [Specify command to launch MPI parallel tests.])], + [WITH_MPIEXEC=$with_mpiexec], [WITH_MPIEXEC=mpiexec]) +AC_MSG_RESULT([$WITH_MPIEXEC]) +AC_SUBST([WITH_MPIEXEC], [$WITH_MPIEXEC]) + +# Is doxygen installed? +AC_CHECK_PROGS([DOXYGEN], [doxygen]) +if test -z "$DOXYGEN" -a "x$enable_docs" = xyes; then + AC_MSG_ERROR([Doxygen not found but --enable-docs used.]) +fi + +AC_MSG_NOTICE([processing doxyfile]) +# If building docs, process Doxyfile.in into Doxyfile. +if test "x$enable_docs" = xyes; then + AC_SUBST([CMAKE_CURRENT_SOURCE_DIR], ["."]) + AC_SUBST([CMAKE_BINARY_DIR], [".."]) + if test "x$enable_fortran" = xno; then + AC_MSG_ERROR([--enable-fortran is required for documentation builds.]) + fi + AC_SUBST([FORTRAN_SRC_FILES], ["../src/flib/piodarray.f90 ../src/flib/pio.F90 ../src/flib/pio_kinds.F90 ../src/flib/piolib_mod.f90 ../src/flib/pionfatt_mod_2.f90 ../src/flib/pio_nf.F90 ../src/flib/pionfget_mod_2.f90 ../src/flib/pionfput_mod.f90 ../src/flib/pio_support.F90 ../src/flib/pio_types.F90"]) + if test "x$enable_developer_docs" = xyes; then + AC_SUBST([C_SRC_FILES], ["../src/clib ../src/ncint"]) + else + AC_SUBST([C_SRC_FILES], ["../src/clib/pio_nc.c ../src/clib/pio_nc4.c ../src/clib/pio_darray.c ../src/clib/pio_get_nc.c ../src/clib/pio_put_nc.c ../src/clib/pioc_support.c ../src/clib/pioc.c ../src/clib/pio_file.c ../src/clib/pio.h ../src/clib/pio_get_vard.c ../src/clib/pio_put_vard.c ../src/ncint/ncint_pio.c ../src/ncint/nc_put_vard.c ../src/ncint/nc_get_vard.c"]) + fi + AC_CONFIG_FILES([doc/Doxyfile]) +fi + +AC_MSG_NOTICE([finding libraries]) + +# Ensure we have MPI. +AC_CHECK_FUNCS([MPI_Init]) +if test "x$ac_cv_func_MPI_Init" != "xyes"; then + AC_MSG_ERROR([Can't link to MPI library. MPI is required.]) +fi + +# Check for netCDF library. +AC_CHECK_LIB([netcdf], [nc_create], [], [AC_MSG_ERROR([Can't find or link to the netcdf library.])]) +AC_CHECK_HEADERS([netcdf.h netcdf_meta.h]) + +# Check for pnetcdf library. +AC_CHECK_LIB([pnetcdf], [ncmpi_create], [], []) +if test "x$ac_cv_lib_pnetcdf_ncmpi_create" = xno -a $enable_pnetcdf = yes; then + AC_MSG_ERROR([Pnetcdf not found. Set CPPFLAGS/LDFLAGS or use --disable-pnetcdf.]) +fi + +# If we have parallel-netcdf, then set these as well. +if test x$ac_cv_lib_pnetcdf_ncmpi_create = xyes; then + AC_DEFINE([_PNETCDF], [1], [parallel-netcdf library available]) +fi + +# Do we have netCDF-4? +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([#include "netcdf_meta.h"], +[[#if !NC_HAS_NC4 +# error +#endif] +])], [have_netcdf4=yes], [have_netcdf4=no]) +AC_MSG_CHECKING([whether netCDF provides netCDF/HDF5]) +AC_MSG_RESULT([${have_netcdf4}]) + +# Do we have a parallel build of netCDF-4? (Really we should be +# checking NC_HAS_PARALLEL4, but that was only recently introduced, so +# we will go with NC_HAS_PARALLEL.) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([#include "netcdf_meta.h"], +[[#if !NC_HAS_PARALLEL +# error +#endif] +])], [have_netcdf_par=yes], [have_netcdf_par=no]) +AC_MSG_CHECKING([whether netCDF provides parallel I/O for netCDF/HDF5]) +AC_MSG_RESULT([${have_netcdf_par}]) + +# Do we have szip? +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([#include "netcdf_meta.h"], +[[#if !NC_HAS_SZIP_WRITE +# error +#endif] +])], [have_szip_write=yes], [have_szip_write=no]) +AC_MSG_CHECKING([whether netCDF provides szip write capability]) +AC_MSG_RESULT([${have_szip_write}]) + +# Do we have parallel filter support? Parallel filters are required +# for iotype NETCDF4P to use compression. +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([#include "netcdf_meta.h"], +[[#if !NC_HAS_PAR_FILTERS +# error +#endif] +])], [have_par_filters=yes], [have_par_filters=no]) +AC_MSG_CHECKING([whether netCDF provides parallel filter support]) +AC_MSG_RESULT([${have_par_filters}]) +if test "x$have_par_filters" = xyes ; then + AC_DEFINE([HAVE_PAR_FILTERS], [1], [if true, netcdf-c supports filters with parallel I/O]) +fi + +# Is this version 4.7.2, which does not work? +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([#include "netcdf_meta.h"], +[[#if NC_VERSION_MAJOR == 4 && NC_VERSION_MINOR == 7 && NC_VERSION_PATCH == 2 +#else +# error +#endif] +])], [have_472=yes], [have_472=no]) +AC_MSG_CHECKING([whether this is netcdf-c-4.7.2]) +AC_MSG_RESULT([${have_472}]) +if test "x$have_472" = xyes; then + AC_MSG_ERROR([PIO cannot build with netcdf-c-4.7.2, please upgrade your netCDF version.]) +fi + +# Do we have the correct dispatch table version in netcdf-c for netcdf +# integration? +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([#include "netcdf_meta.h"], +[[#if NC_DISPATCH_VERSION != 2 +# error +#endif] +])], [have_dispatch2=yes], [have_dispatch2=no]) +AC_MSG_CHECKING([whether netcdf-c supports version 2 of dispatch table for netcdf integration]) +AC_MSG_RESULT([${have_dispatch2}]) +if test "x$enable_netcdf_integration" = xyes -a "x$have_dispatch2" = xno; then + AC_MSG_ERROR([NetCDF integration cannot be used with this version of netcdf-c, please upgrade your netCDF version.]) +fi + +# Set some build settings for when netcdf-4 is supported. +if test x$have_netcdf_par = xyes; then + AC_DEFINE([_NETCDF4],[1],[Does netCDF library provide netCDF-4 with parallel access]) +fi +AM_CONDITIONAL(BUILD_NETCDF4, [test "x$have_netcdf_par" = xyes]) + +# Not working for some reason, so I will just set it... +#AC_CHECK_TYPE([MPI_Offset], [], [], [#include ]) +#if test "x${ac_cv_type_MPI_Offset}" = xyes; then +# AC_CHECK_SIZEOF([MPI_Offset], [], [#include ]) +#else +# AC_MSG_ERROR([Unable to find type MPI_Offset in mpi.h]) +#fi + +# If we want the timing library, we must find it. +if test "x$enable_timing" = xyes; then + AC_CHECK_HEADERS([gptl.h]) + AC_CHECK_LIB([gptl], [GPTLinitialize], [], + [AC_MSG_ERROR([Can't find or link to the GPTL library.])]) + if test "x$enable_fortran" = xyes; then + AC_LANG_PUSH([Fortran]) +# AC_CHECK_HEADERS([gptl.inc]) + AC_CHECK_LIB([gptlf], [gptlstart], [], + [AC_MSG_ERROR([Can't find or link to the GPTL Fortran library.])]) + AC_LANG_POP([Fortran]) + fi + + # Check for papi library. + AC_CHECK_LIB([papi], [PAPI_library_init]) + AC_MSG_CHECKING([whether system can support PAPI]) + have_papi=no + if test $enable_papi = yes; then + if test "x$ac_cv_lib_papi_PAPI_library_init" = xyes; then + # If we have PAPI library, check /proc/sys/kernel/perf_event_paranoid + # to see if we have permissions. + if test -f /proc/sys/kernel/perf_event_paranoid; then + if test `cat /proc/sys/kernel/perf_event_paranoid` != 1; then + AC_MSG_ERROR([PAPI library found, but /proc/sys/kernel/perf_event_paranoid != 1 + try sudo sh -c 'echo 1 >/proc/sys/kernel/perf_event_paranoid']) + fi + fi + AC_DEFINE([HAVE_PAPI], [1], [PAPI library is present and usable]) + have_papi=yes + fi + fi + AC_MSG_RESULT($have_papi) +fi +AM_CONDITIONAL([HAVE_PAPI], [test "x$have_papi" = xyes]) + +# Does the user want to build netcdf-c integration layer? +AC_MSG_CHECKING([whether netcdf-c integration layer should be build]) +AC_ARG_ENABLE([netcdf-integration], + [AS_HELP_STRING([--enable-netcdf-integration], + [enable building of netCDF C API integration.])]) +test "x$enable_netcdf_integration" = xyes || enable_netcdf_integration=no +AC_MSG_RESULT([$enable_netcdf_integration]) +if test "x$enable_netcdf_integration" = xyes -a "x$enable_timing" = xyes; then + AC_MSG_ERROR([Cannot use GPTL timing library with netCDF interation.]) +fi +if test "x$enable_netcdf_integration" = xyes -a "x$have_netcdf_par" = xno; then + AC_MSG_ERROR([Cannot use netCDF integration unless netCDF library was built for parallel I/O.]) +fi + +# If netCDF integration is used, set this preprocessor symbol. +if test "x$enable_netcdf_integration" = xyes; then + AC_DEFINE([NETCDF_INTEGRATION],[1],[Are we building with netCDF integration]) +fi +AM_CONDITIONAL(BUILD_NCINT, [test "x$enable_netcdf_integration" = xyes]) + +# If we are building netCDF integration and also then PIO Fortran +# library, then we also need netcdf-fortran. +if test "x$enable_netcdf_integration" = xyes -a "x$enable_fortran" = xyes; then + AC_LANG_PUSH([Fortran]) + AC_CHECK_LIB([netcdff], [nf_inq_libvers], [], [AC_MSG_ERROR([Can't find or link to the netcdf-fortran library, required because both --enable-fortran and --enable-netcdf-integration are specified.])]) + AC_LANG_POP([Fortran]) +fi + +AC_CONFIG_FILES([tests/general/pio_tutil.F90:tests/general/util/pio_tutil.F90]) + +# The user may have changed the MPIEXEC for these test scripts. +AC_CONFIG_FILES([tests/cunit/run_tests.sh], [chmod ugo+x tests/cunit/run_tests.sh]) +AC_CONFIG_FILES([tests/ncint/run_tests.sh], [chmod ugo+x tests/ncint/run_tests.sh]) +AC_CONFIG_FILES([tests/ncint/run_perf.sh], [chmod ugo+x tests/ncint/run_perf.sh]) +AC_CONFIG_FILES([tests/fncint/run_tests.sh], [chmod ugo+x tests/fncint/run_tests.sh]) +AC_CONFIG_FILES([tests/general/run_tests.sh], [chmod ugo+x tests/general/run_tests.sh]) +AC_CONFIG_FILES([tests/performance/run_tests.sh], [chmod ugo+x tests/performance/run_tests.sh]) +AC_CONFIG_FILES([tests/unit/run_tests.sh], [chmod ugo+x tests/unit/run_tests.sh]) +AC_CONFIG_FILES([examples/c/run_tests.sh], [chmod ugo+x examples/c/run_tests.sh]) +AC_CONFIG_FILES([examples/f03/run_tests.sh], [chmod ugo+x examples/f03/run_tests.sh]) + +# Args: +# 1. netcdf_meta.h variable +# 2. conditional variable that is yes or no. +# 3. default condition +# +# example: AX_SET_META([NC_HAS_NC2],[$nc_build_v2],[]) # Because it checks for no. +# AX_SET_META([NC_HAS_HDF4],[$enable_hdf4],[yes]) +AC_DEFUN([AX_SET_META],[ + if [ test "x$2" = x$3 ]; then + AC_SUBST([$1]) $1=1 + else + AC_SUBST([$1]) $1=0 + fi +]) + +##### +# Define values used in include/pio_meta.h +##### +AX_SET_META([PIO_HAS_SZIP_WRITE],[$have_szip_write],[yes]) +AX_SET_META([PIO_HAS_PNETCDF],[$enable_pnetcdf],[yes]) +AX_SET_META([PIO_HAS_PAR_FILTERS], [$have_par_filters],[yes]) +AX_SET_META([PIO_HAS_NETCDF4], [$have_netcdf4],[yes]) +AX_SET_META([PIO_HAS_NETCDF4_PAR], [$have_netcdf_par],[yes]) +AX_SET_META([PIO_HAS_NETCDF_INTEGRATION], [$enable_netcdf_integration],[yes]) + +# Create output variables from various shell variables, for use in +# generating libpio.settings. +AC_SUBST([enable_shared]) +AC_SUBST([enable_static]) +AC_SUBST([CFLAGS]) +AC_SUBST([CPPFLAGS]) +AC_SUBST([FFLAGS]) +AC_SUBST([FCFLAGS]) +AC_SUBST([LDFLAGS]) +AC_SUBST([FPPFLAGS]) # ignored by autotools +AC_SUBST(HAS_PNETCDF,[$enable_pnetcdf]) +AC_SUBST(HAS_LOGGING, [$enable_logging]) +AC_SUBST(HAS_SZIP_WRITE, [$have_szip_write]) +AC_SUBST([HAS_PAR_FILTERS], [$have_par_filters]) +AC_SUBST([HAS_NETCDF4], [$have_netcdf4]) +AC_SUBST([HAS_NETCDF4_PAR], [$have_netcdf_par]) +AC_SUBST([HAS_NETCDF_INTEGRATION], [$enable_netcdf_integration]) +AC_SUBST([HAS_PIO_FORTRAN], [$enable_fortran]) + +# Create the build summary file. +AC_CONFIG_FILES([libpio.settings + src/clib/pio_meta.h + ]) +AC_CONFIG_LINKS([tests/unit/input.nl:tests/unit/input.nl]) + +# Create the config.h file. +AC_CONFIG_HEADERS([config.h]) + +# Create the makefiles. +AC_OUTPUT(Makefile + src/Makefile + src/clib/Makefile + src/ncint/Makefile + src/flib/Makefile + src/gptl/Makefile + tests/Makefile + tests/cunit/Makefile + tests/ncint/Makefile + tests/fncint/Makefile + tests/unit/Makefile + tests/general/Makefile + tests/general/util/Makefile + tests/performance/Makefile + doc/Makefile + doc/source/Makefile + doc/images/Makefile + examples/Makefile + examples/c/Makefile + examples/f03/Makefile + cmake/Makefile + scripts/Makefile) + +# Show the build summary. +cat libpio.settings + + diff --git a/ParallelIO/ctest/CTestEnvironment-alcf.cmake b/ParallelIO/ctest/CTestEnvironment-alcf.cmake new file mode 100644 index 0000000000..607076479d --- /dev/null +++ b/ParallelIO/ctest/CTestEnvironment-alcf.cmake @@ -0,0 +1,14 @@ +#============================================================================== +# +# This file sets the environment variables needed to configure and build +# on the Argonne Leadership Computing Facility systems +# (mira/cetus/vesta/cooley). +# +#============================================================================== + +# Assume all package locations (NetCDF, PnetCDF, HDF5, etc) are already +# set with existing environment variables: NETCDF, PNETCDF, HDF5, etc. + +# Define the extra CMake configure options +set (CTEST_CONFIGURE_OPTIONS "-DCMAKE_VERBOSE_MAKEFILE=TRUE") +set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPREFER_STATIC=TRUE") diff --git a/ParallelIO/ctest/CTestEnvironment-anlworkstation.cmake b/ParallelIO/ctest/CTestEnvironment-anlworkstation.cmake new file mode 100644 index 0000000000..38e5b4b0a9 --- /dev/null +++ b/ParallelIO/ctest/CTestEnvironment-anlworkstation.cmake @@ -0,0 +1,26 @@ +#============================================================================== +# +# This file sets the environment variables needed to configure and build +# on Argonne Linux workstations +# +#============================================================================== + +# Assume all package locations (NetCDF, PnetCDF, HDF5, etc) are already +# set with existing environment variables: NETCDF, PNETCDF, HDF5, etc. + +# Define the extra CMake configure options +set (CTEST_CONFIGURE_OPTIONS "-DCMAKE_VERBOSE_MAKEFILE=TRUE") +set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DNetCDF_PATH=$ENV{NETCDFROOT}") +set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPnetCDF_PATH=$ENV{PNETCDFROOT}") +set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DHDF5_PATH=$ENV{HDF5ROOT}") + +# If ENABLE_COVERAGE environment variable is set, then enable code coverage +if (DEFINED ENV{ENABLE_COVERAGE}) + set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPIO_ENABLE_COVERAGE=ON") +endif () + +# If VALGRIND_CHECK environment variable is set, then enable memory leak check using Valgrind +if (DEFINED ENV{VALGRIND_CHECK}) + set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPIO_VALGRIND_CHECK=ON") +endif () + diff --git a/ParallelIO/ctest/CTestEnvironment-cgd.cmake b/ParallelIO/ctest/CTestEnvironment-cgd.cmake new file mode 100644 index 0000000000..eb8606e46d --- /dev/null +++ b/ParallelIO/ctest/CTestEnvironment-cgd.cmake @@ -0,0 +1,17 @@ +#============================================================================== +# +# This file sets the environment variables needed to configure and build +# on the NCAR CGD cluster Hobart +# +#============================================================================== + +# Assume all package locations (NetCDF, PnetCDF, HDF5, etc) are already +# set with existing environment variables: NETCDF, PNETCDF, HDF5, etc. + +# Define the extra CMake configure options +set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DCMAKE_VERBOSE_MAKEFILE=TRUE -DPNETCDF_DIR=$ENV{PNETCDF_PATH} -DNETCDF_DIR=$ENV{NETCDF_PATH}") + +# If MPISERIAL environment variable is set, then enable MPISERIAL +if (DEFINED ENV{MPISERIAL}) + set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPIO_USE_MPISERIAL=ON") +endif () diff --git a/ParallelIO/ctest/CTestEnvironment-ncsa.cmake b/ParallelIO/ctest/CTestEnvironment-ncsa.cmake new file mode 100644 index 0000000000..706946ec2b --- /dev/null +++ b/ParallelIO/ctest/CTestEnvironment-ncsa.cmake @@ -0,0 +1,22 @@ +#============================================================================== +# +# This file sets the environment variables needed to configure and build +# on the NCSA systems +# (Blue Waters). +# +#============================================================================== + +# Assume all package locations (NetCDF, PnetCDF, HDF5, etc) are already +# set with existing environment variables: NETCDF, PNETCDF, HDF5, etc. + +# Define the extra CMake configure options +set (CTEST_CONFIGURE_OPTIONS "-DCMAKE_VERBOSE_MAKEFILE=TRUE") +set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPREFER_STATIC=TRUE") +set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DNetCDF_PATH=$ENV{NETCDF_DIR}") +set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPnetCDF_PATH=$ENV{PARALLEL_NETCDF_DIR}") +set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DHDF5_PATH=$ENV{HDF5_DIR}") +set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DMPI_C_INCLUDE_PATH=$ENV{MPICH_DIR}/include") +set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DMPI_Fortran_INCLUDE_PATH=$ENV{MPICH_DIR}/include") +set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DMPI_C_LIBRARIES=$ENV{MPICH_DIR}/lib/libmpich.a") +set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DMPI_Fortran_LIBRARIES=$ENV{MPICH_DIR}/lib/libmpichf90.a") +set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DCMAKE_SYSTEM_NAME=Catamount") diff --git a/ParallelIO/ctest/CTestEnvironment-nersc.cmake b/ParallelIO/ctest/CTestEnvironment-nersc.cmake new file mode 100644 index 0000000000..6b1ac8fa79 --- /dev/null +++ b/ParallelIO/ctest/CTestEnvironment-nersc.cmake @@ -0,0 +1,22 @@ +#============================================================================== +# +# This file sets the environment variables needed to configure and build +# on the NERSC systems +# (edison/ corip1). +# +#============================================================================== + +# Assume all package locations (NetCDF, PnetCDF, HDF5, etc) are already +# set with existing environment variables: NETCDF, PNETCDF, HDF5, etc. + +# Define the extra CMake configure options +set (CTEST_CONFIGURE_OPTIONS "-DCMAKE_VERBOSE_MAKEFILE=TRUE") +set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPREFER_STATIC=TRUE") +set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DNETCDF_DIR=$ENV{NETCDF_DIR}") +set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPNETCDF_DIR=$ENV{PARALLEL_NETCDF_DIR}") +#set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DHDF5_PATH=$ENV{HDF5_DIR}") +#set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DMPI_C_INCLUDE_PATH=$ENV{MPICH_DIR}/include") +#set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DMPI_Fortran_INCLUDE_PATH=$ENV{MPICH_DIR}/include") +#set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DMPI_C_LIBRARIES=$ENV{MPICH_DIR}/lib/libmpich.a") +#set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DMPI_Fortran_LIBRARIES=$ENV{MPICH_DIR}/lib/libmpichf90.a") +set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DCMAKE_SYSTEM_NAME=Catamount") diff --git a/ParallelIO/ctest/CTestEnvironment-nwscla.cmake b/ParallelIO/ctest/CTestEnvironment-nwscla.cmake new file mode 100644 index 0000000000..efee6bf659 --- /dev/null +++ b/ParallelIO/ctest/CTestEnvironment-nwscla.cmake @@ -0,0 +1,18 @@ +#============================================================================== +# +# This file sets the environment variables needed to configure and build +# on the new NCAR Wyoming Supercomputing Center systems +# (laramie/cheyenne). +# +#============================================================================== + +# Assume all package locations (NetCDF, PnetCDF, HDF5, etc) are already +# set with existing environment variables: NETCDF, PNETCDF, HDF5, etc. + +# Define the extra CMake configure options +set (CTEST_CONFIGURE_OPTIONS "-DCMAKE_VERBOSE_MAKEFILE=TRUE ") + +# If MPISERIAL environment variable is set, then enable MPISERIAL +if (DEFINED ENV{MPISERIAL}) + set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPIO_USE_MPISERIAL=ON") +endif () diff --git a/ParallelIO/ctest/CTestEnvironment-unknown.cmake b/ParallelIO/ctest/CTestEnvironment-unknown.cmake new file mode 100644 index 0000000000..8d51f15fff --- /dev/null +++ b/ParallelIO/ctest/CTestEnvironment-unknown.cmake @@ -0,0 +1,12 @@ +#============================================================================== +# +# This file sets the CMake variables needed to configure and build +# on the default ("unknown") system. +# +#============================================================================== + +# Assume all package locations (NetCDF, PnetCDF, HDF5, etc) are already +# set with existing environment variables: NETCDF, PNETCDF, HDF5, etc. + +# Define the extra CMake configure options +set (CTEST_CONFIGURE_OPTIONS "-DCMAKE_VERBOSE_MAKEFILE=TRUE") diff --git a/ParallelIO/ctest/CTestScript-Test.cmake b/ParallelIO/ctest/CTestScript-Test.cmake new file mode 100644 index 0000000000..cf50195a25 --- /dev/null +++ b/ParallelIO/ctest/CTestScript-Test.cmake @@ -0,0 +1,29 @@ +#============================================================================== +# +# This is the CTest script for generating test results for submission to the +# CTest Dashboard site: my.cdash.org. +# +# Example originally stolen from: +# http://www.vtk.org/Wiki/CTest:Using_CTEST_and_CDASH_without_CMAKE +#============================================================================== + +#------------------------------------------- +#-- Get the common build information +#------------------------------------------- + +set (CTEST_SITE $ENV{PIO_DASHBOARD_SITE}-$ENV{PIO_COMPILER_ID}) +set (CTEST_BUILD_NAME $ENV{PIO_DASHBOARD_BUILD_NAME}) +set (CTEST_SOURCE_DIRECTORY $ENV{PIO_DASHBOARD_SOURCE_DIR}) +set (CTEST_BINARY_DIRECTORY $ENV{PIO_DASHBOARD_BINARY_DIR}) + +# ----------------------------------------------------------- +# -- Run CTest- TESTING ONLY (Appended to existing TAG) +# ----------------------------------------------------------- + +## -- Start +ctest_start("${CTEST_SCRIPT_ARG}" APPEND) + +## -- TEST +ctest_test() + +## Don't submit! Submission handled by main CTestScript diff --git a/ParallelIO/ctest/runcdash-alcf-ibm.sh b/ParallelIO/ctest/runcdash-alcf-ibm.sh new file mode 100755 index 0000000000..9f36996bda --- /dev/null +++ b/ParallelIO/ctest/runcdash-alcf-ibm.sh @@ -0,0 +1,37 @@ +#!/bin/sh + +# Get/Generate the Dashboard Model +if [ $# -eq 0 ]; then + model=Experimental +else + model=$1 +fi + +# Manually set environment variables for CTest run/build +GIT=/soft/versioning/git/2.3.0/bin/git +CTEST=/soft/buildtools/cmake/3.3.0/bin/ctest + +export LIBZ=/soft/libraries/alcf/current/xl/ZLIB +export HDF5=/soft/libraries/hdf5/1.8.14/cnk-xl/V1R2M2-20150213 +export NETCDF=/soft/libraries/netcdf/4.3.3-f4.4.1/cnk-xl/V1R2M2-20150213 +export PNETCDF=/soft/libraries/pnetcdf/1.6.0/cnk-xl/V1R2M2-20150213 + +export CC=/soft/compilers/wrappers/xl/mpixlc_r +export FC=/soft/compilers/wrappers/xl/mpixlf90_r + +export PIO_DASHBOARD_ROOT=`pwd`/dashboard +export PIO_COMPILER_ID=Cray-`$CC -qversion | head -n 2 | tail -n 1 | cut -d' ' -f2` + +if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then + mkdir "$PIO_DASHBOARD_ROOT" +fi +cd "$PIO_DASHBOARD_ROOT" + +if [ ! -d src ]; then + $GIT clone https://github.com/PARALLELIO/ParallelIO src +fi +cd src +git checkout develop +git pull origin develop + +$CTEST -S CTestScript.cmake,${model} -VV diff --git a/ParallelIO/ctest/runcdash-anlworkstation.sh b/ParallelIO/ctest/runcdash-anlworkstation.sh new file mode 100755 index 0000000000..44651b2f3e --- /dev/null +++ b/ParallelIO/ctest/runcdash-anlworkstation.sh @@ -0,0 +1,45 @@ +#!/bin/sh + +# Get/Generate the Dashboard Model +if [ $# -eq 0 ]; then + model=Experimental +else + model=$1 +fi + +source /software/common/adm/packages/softenv-1.6.2/etc/softenv-load.sh +source /software/common/adm/packages/softenv-1.6.2/etc/softenv-aliases.sh + +soft add +gcc-6.2.0 +soft add +mpich-3.2-gcc-6.2.0 +soft add +cmake-3.5.1 + +export NETCDFROOT=/soft/apps/packages/climate/netcdf/4.4.1c-4.2cxx-4.4.4f-parallel/gcc-6.2.0 +export PNETCDFROOT=/soft/apps/packages/climate/pnetcdf/1.7.0/gcc-6.2.0 +export HDF5ROOT=/soft/apps/packages/climate/hdf5/1.8.16-parallel/gcc-6.2.0 + +export CC=mpicc +export FC=mpifort + +export PIO_DASHBOARD_SITE=anlworkstation-`hostname` +export PIO_DASHBOARD_ROOT=/sandbox/dashboard +export CTEST_SCRIPT_DIRECTORY=${PIO_DASHBOARD_ROOT}/src +export PIO_DASHBOARD_SOURCE_DIR=${CTEST_SCRIPT_DIRECTORY} +export PIO_COMPILER_ID=gcc-`gcc --version | head -n 1 | cut -d' ' -f3` + +if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then + mkdir "$PIO_DASHBOARD_ROOT" +fi +cd "$PIO_DASHBOARD_ROOT" + +echo "CTEST_SCRIPT_DIRECTORY="${CTEST_SCRIPT_DIRECTORY} +echo "PIO_DASHBOARD_SOURCE_DIR="${PIO_DASHBOARD_SOURCE_DIR} + +if [ ! -d src ]; then + git clone --branch develop https://github.com/PARALLELIO/ParallelIO src +fi +cd src +git checkout develop +git pull origin develop + +ctest -S CTestScript.cmake,${model} -VV diff --git a/ParallelIO/ctest/runcdash-cgd-gnu-openmpi.sh b/ParallelIO/ctest/runcdash-cgd-gnu-openmpi.sh new file mode 100755 index 0000000000..57cad94053 --- /dev/null +++ b/ParallelIO/ctest/runcdash-cgd-gnu-openmpi.sh @@ -0,0 +1,38 @@ +#!/bin/sh + +# Get/Generate the Dashboard Model +if [ $# -eq 0 ]; then + model=Experimental +else + model=$1 +fi + +module purge +module load compiler/gnu/5.4.0 +module load tool/parallel-netcdf/1.8.1/gnu-5.4.0/openmpi + +export CC=mpicc +export FC=mpif90 +export PIO_DASHBOARD_SITE="cgd" +export PIO_DASHBOARD_ROOT=/scratch/cluster/jedwards/dashboard +export CTEST_SCRIPT_DIRECTORY=${PIO_DASHBOARD_ROOT}/src +export PIO_DASHBOARD_SOURCE_DIR=${CTEST_SCRIPT_DIRECTORY} +export PIO_COMPILER_ID=gcc-`gcc --version | head -n 1 | cut -d' ' -f3` + +if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then + mkdir "$PIO_DASHBOARD_ROOT" +fi +cd "$PIO_DASHBOARD_ROOT" + +echo "CTEST_SCRIPT_DIRECTORY="${CTEST_SCRIPT_DIRECTORY} +echo "PIO_DASHBOARD_SOURCE_DIR="${PIO_DASHBOARD_SOURCE_DIR} + +if [ ! -d src ]; then + git clone --branch develop https://github.com/PARALLELIO/ParallelIO src +fi +cd src +git checkout develop +git pull origin develop + + +ctest -S CTestScript.cmake,${model} -VV -DCTEST_CONFIGURE_OPTIONS="-DCMAKE_EXE_LINKER_FLAGS=-ldl" diff --git a/ParallelIO/ctest/runcdash-cgd-nag.sh b/ParallelIO/ctest/runcdash-cgd-nag.sh new file mode 100755 index 0000000000..e413186131 --- /dev/null +++ b/ParallelIO/ctest/runcdash-cgd-nag.sh @@ -0,0 +1,38 @@ +#!/bin/sh + +# Get/Generate the Dashboard Model +if [ $# -eq 0 ]; then + model=Experimental +else + model=$1 +fi + +module purge +module load compiler/nag/6.1 +module load tool/parallel-netcdf/1.7.0/nag/mvapich2 + +export CC=mpicc +export FC=mpif90 +export PIO_DASHBOARD_SITE="cgd" +export PIO_DASHBOARD_ROOT=/scratch/cluster/jedwards/dashboard +export CTEST_SCRIPT_DIRECTORY=${PIO_DASHBOARD_ROOT}/src +export PIO_DASHBOARD_SOURCE_DIR=${CTEST_SCRIPT_DIRECTORY} +export PIO_COMPILER_ID=Nag-6.1-gcc-`gcc --version | head -n 1 | cut -d' ' -f3` + +if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then + mkdir "$PIO_DASHBOARD_ROOT" +fi +cd "$PIO_DASHBOARD_ROOT" + +echo "CTEST_SCRIPT_DIRECTORY="${CTEST_SCRIPT_DIRECTORY} +echo "PIO_DASHBOARD_SOURCE_DIR="${PIO_DASHBOARD_SOURCE_DIR} + +if [ ! -d src ]; then + git clone --branch develop https://github.com/PARALLELIO/ParallelIO src +fi +cd src +git checkout develop +git pull origin develop + + +ctest -S CTestScript.cmake,${model} -VV diff --git a/ParallelIO/ctest/runcdash-nersc-cray.sh b/ParallelIO/ctest/runcdash-nersc-cray.sh new file mode 100755 index 0000000000..d3516cea7d --- /dev/null +++ b/ParallelIO/ctest/runcdash-nersc-cray.sh @@ -0,0 +1,74 @@ +#!/bin/sh + +# Get/Generate the Dashboard Model +if [ $# -eq 0 ]; then + model=Experimental +else + model=$1 +fi + +module rm PrgEnv-intel +module rm PrgEnv-cray +module rm PrgEnv-gnu +module rm intel +module rm cce +module rm cray-parallel-netcdf +module rm cray-parallel-hdf5 +module rm pmi +module rm cray-libsci +module rm cray-mpich2 +module rm cray-mpich +module rm cray-netcdf +module rm cray-hdf5 +module rm cray-netcdf-hdf5parallel +module rm craype-sandybridge +module rm craype-ivybridge +module rm craype-haswell +module rm craype +module load PrgEnv-cray + +case "$NERSC_HOST" in + edison) + cd $CSCRATCH/dashboard + module switch cce cce/8.5.1 + module load craype-ivybridge + module load git/2.4.6 + module load cmake/3.3.2 + module load cray-hdf5-parallel/1.8.16 + module load cray-netcdf-hdf5parallel/4.3.3.1 + module load cray-parallel-netcdf/1.7.0 + ;; + cori) + cd $SCRATCH/dashboard + module switch cce cce/8.5.4 + module load craype-mic-knl + module load git/2.9.1 + module load cmake/3.3.2 + module load cray-hdf5-parallel/1.8.16 + module load cray-netcdf-hdf5parallel/4.3.3.1 + module load cray-parallel-netcdf/1.7.0 + ;; + +esac + +export CC=cc +export FC=ftn + +export PIO_DASHBOARD_ROOT=`pwd`/dashboard +export PIO_COMPILER_ID=Cray-`$CC -V 2>&1 | cut -d' ' -f5` + +if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then + mkdir "$PIO_DASHBOARD_ROOT" +fi +cd "$PIO_DASHBOARD_ROOT" + +if [ ! -d src ]; then + git clone --branch develop https://github.com/PARALLELIO/ParallelIO src +fi +cd src +git checkout develop +git pull origin develop + +export HDF5_DISABLE_VERSION_CHECK=2 + +ctest -S CTestScript.cmake,${model} -VV diff --git a/ParallelIO/ctest/runcdash-nersc-intel.sh b/ParallelIO/ctest/runcdash-nersc-intel.sh new file mode 100755 index 0000000000..55c80559b6 --- /dev/null +++ b/ParallelIO/ctest/runcdash-nersc-intel.sh @@ -0,0 +1,73 @@ +#!/bin/sh + +# Get/Generate the Dashboard Model +if [ $# -eq 0 ]; then + model=Experimental +else + model=$1 +fi + +module rm PrgEnv-intel +module rm PrgEnv-cray +module rm PrgEnv-gnu +module rm intel +module rm cce +module rm cray-parallel-netcdf +module rm cray-parallel-hdf5 +module rm pmi +module rm cray-libsci +module rm cray-mpich2 +module rm cray-mpich +module rm cray-netcdf +module rm cray-hdf5 +module rm cray-netcdf-hdf5parallel +module rm craype-sandybridge +module rm craype-ivybridge +module rm craype-haswell +module rm craype +module load PrgEnv-intel + +case "$NERSC_HOST" in + edison) + cd $CSCRATCH/dashboard + module switch intel intel/16.0.0.109 + module load craype-ivybridge + module load git/2.4.6 + module load cmake/3.3.2 + module load cray-hdf5-parallel/1.8.16 + module load cray-netcdf-hdf5parallel/4.3.3.1 + module load cray-parallel-netcdf/1.7.0 + ;; + cori) + cd $SCRATCH/dashboard + module switch intel intel/17.0.1.132 + module load craype-mic-knl + module load git/2.9.1 + module load cmake/3.3.2 + module load cray-hdf5-parallel/1.8.16 + module load cray-netcdf-hdf5parallel/4.3.3.1 + module load cray-parallel-netcdf/1.7.0 + ;; + +esac + +export CC=cc +export FC=ftn + +export PIO_DASHBOARD_ROOT=`pwd`/dashboard +export PIO_COMPILER_ID=Intel-`$CC --version | head -n 1 | cut -d' ' -f3` + +if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then + mkdir "$PIO_DASHBOARD_ROOT" +fi +cd "$PIO_DASHBOARD_ROOT" + +if [ ! -d src ]; then + git clone --branch develop https://github.com/PARALLELIO/ParallelIO src +fi +cd src +git checkout develop +git pull origin develop + +export HDF5_DISABLE_VERSION_CHECK=2 +ctest -S CTestScript.cmake,${model} -VV diff --git a/ParallelIO/ctest/runcdash-nwsc-intel-mpiserial.sh b/ParallelIO/ctest/runcdash-nwsc-intel-mpiserial.sh new file mode 100755 index 0000000000..68ac5826be --- /dev/null +++ b/ParallelIO/ctest/runcdash-nwsc-intel-mpiserial.sh @@ -0,0 +1,37 @@ +#!/bin/sh + +# Get/Generate the Dashboard Model +if [ $# -eq 0 ]; then + model=Experimental +else + model=$1 +fi + +module reset +module unload netcdf +module swap intel intel/15.0.3 +module load git/2.3.0 +module load cmake/3.0.2 +module load netcdf/4.3.3.1 + +export MPISERIAL=/glade/u/home/jedwards/mpi-serial/intel15.0.3/ + +export CC=icc +export FC=ifort + +export PIO_DASHBOARD_ROOT=`pwd`/dashboard +export PIO_COMPILER_ID=Serial-Intel-`$CC --version | head -n 1 | cut -d' ' -f3` + +if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then + mkdir "$PIO_DASHBOARD_ROOT" +fi +cd "$PIO_DASHBOARD_ROOT" + +if [ ! -d src ]; then + git clone --branch develop https://github.com/PARALLELIO/ParallelIO src +fi +cd src +git checkout develop +git pull origin develop + +ctest -S CTestScript.cmake,${model} -VV diff --git a/ParallelIO/ctest/runcdash-nwsc-intel.sh b/ParallelIO/ctest/runcdash-nwsc-intel.sh new file mode 100755 index 0000000000..1e72e9a1b3 --- /dev/null +++ b/ParallelIO/ctest/runcdash-nwsc-intel.sh @@ -0,0 +1,36 @@ +#!/bin/sh + +# Get/Generate the Dashboard Model +if [ $# -eq 0 ]; then + model=Experimental +else + model=$1 +fi + +module reset +module unload netcdf +module swap intel intel/16.0.3 +module load git/2.3.0 +module load cmake/3.0.2 +module load netcdf-mpi/4.4.1 +module load pnetcdf/1.7.0 + +export CC=mpicc +export FC=mpif90 + +export PIO_DASHBOARD_ROOT=`pwd`/dashboard +export PIO_COMPILER_ID=Intel-`$CC --version | head -n 1 | cut -d' ' -f3` + +if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then + mkdir "$PIO_DASHBOARD_ROOT" +fi +cd "$PIO_DASHBOARD_ROOT" + +if [ ! -d src ]; then + git clone --branch develop https://github.com/PARALLELIO/ParallelIO src +fi +cd src +git checkout develop +git pull origin develop + +ctest -S CTestScript.cmake,${model} -VV diff --git a/ParallelIO/ctest/runcdash-nwscla-gnu.sh b/ParallelIO/ctest/runcdash-nwscla-gnu.sh new file mode 100755 index 0000000000..ba6b60a5d8 --- /dev/null +++ b/ParallelIO/ctest/runcdash-nwscla-gnu.sh @@ -0,0 +1,37 @@ +#!/bin/sh + +# Get/Generate the Dashboard Model +if [ $# -eq 0 ]; then + model=Experimental +else + model=$1 +fi + +module reset +module unload netcdf +module swap intel gnu/10.1.0 +module swap mpt openmpi/4.0.3 +module load git +module load cmake +module load netcdf-mpi/4.7.3 +module load pnetcdf/1.12.1 + +export CC=mpicc +export FC=mpif90 + +export PIO_DASHBOARD_ROOT=/glade/u/home/jedwards/sandboxes/dashboard +export PIO_COMPILER_ID=GNU-`$CC --version | head -n 1 | tail -n 1 | cut -d' ' -f3` + +if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then + mkdir "$PIO_DASHBOARD_ROOT" +fi +cd "$PIO_DASHBOARD_ROOT" + +if [ ! -d src ]; then + git clone https://github.com/PARALLELIO/ParallelIO src +fi +cd src +git checkout master +git pull origin master + +ctest -S CTestScript.cmake,${model} -VV diff --git a/ParallelIO/ctest/runcdash-nwscla-intel.sh b/ParallelIO/ctest/runcdash-nwscla-intel.sh new file mode 100755 index 0000000000..e85d5760c0 --- /dev/null +++ b/ParallelIO/ctest/runcdash-nwscla-intel.sh @@ -0,0 +1,40 @@ +#!/bin/sh + +# Get/Generate the Dashboard Model +if [ $# -eq 0 ]; then + model=Experimental +else + model=$1 +fi + +source /etc/profile.d/modules.sh + +module reset +module unload netcdf +module swap intel intel/19.1.1 +module switch mpt mpt/2.22 +module load cmake/3.18.2 +module load netcdf-mpi/4.7.3 +module load pnetcdf/1.12.1 +echo "MODULE LIST..." +module list + +export CC=mpicc +export FC=mpif90 +export MPI_TYPE_DEPTH=24 +export PIO_DASHBOARD_ROOT=/glade/scratch/jedwards/dashboard +export PIO_COMPILER_ID=Intel-`$CC --version | head -n 1 | cut -d' ' -f3` + +if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then + mkdir "$PIO_DASHBOARD_ROOT" +fi +cd "$PIO_DASHBOARD_ROOT" + +if [ ! -d src ]; then + git clone https://github.com/PARALLELIO/ParallelIO src +fi +cd src +git checkout master +git pull origin master + +ctest -S CTestScript.cmake,${model} -VV diff --git a/ParallelIO/ctest/runcdash-nwscla-pgi.sh b/ParallelIO/ctest/runcdash-nwscla-pgi.sh new file mode 100755 index 0000000000..8d9c0cde8b --- /dev/null +++ b/ParallelIO/ctest/runcdash-nwscla-pgi.sh @@ -0,0 +1,37 @@ +#!/bin/sh + +# Get/Generate the Dashboard Model +if [ $# -eq 0 ]; then + model=Experimental +else + model=$1 +fi + +module reset +module unload netcdf +module swap intel pgi/20.4 +module swap mpt mpt/2.22 +module load git/2.22.0 +module load cmake/3.18.2 +module load netcdf-mpi/4.7.3 +module load pnetcdf/1.12.1 + +export CC=mpicc +export FC=mpif90 +export MPI_TYPE_DEPTH=24 +export PIO_DASHBOARD_ROOT=/glade/u/home/jedwards/sandboxes/dashboard +export PIO_COMPILER_ID=PGI-`$CC --version | head -n 2 | tail -n 1 | cut -d' ' -f4` + +if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then + mkdir "$PIO_DASHBOARD_ROOT" +fi +cd "$PIO_DASHBOARD_ROOT" + +if [ ! -d src ]; then + git clone https://github.com/PARALLELIO/ParallelIO src +fi +cd src +git checkout master +git pull origin master + +ctest -S CTestScript.cmake,${model} -VV diff --git a/ParallelIO/ctest/runctest-alcf.sh b/ParallelIO/ctest/runctest-alcf.sh new file mode 100755 index 0000000000..6b5fa20f1c --- /dev/null +++ b/ParallelIO/ctest/runctest-alcf.sh @@ -0,0 +1,42 @@ +#!/bin/sh +#============================================================================== +# +# This script defines how to run CTest on the Argonne Leadership Computing +# Facility systems (mira/cetus/vesta/cooley). +# +# This assumes the CTest model name (e.g., "Nightly") is passed to it when +# run. +# +#============================================================================== + +# Get the CTest script directory +scrdir=$1 + +# Get the CTest model name +model=$2 + +# Write QSUB submission script with the test execution command +echo "#!/bin/sh" > runctest.sh +echo "CTESTCMD=`which ctest`" >> runctest.sh +echo "\$CTESTCMD -S ${scrdir}/CTestScript-Test.cmake,${model} -V" >> runctest.sh + +# Make the QSUB script executable +chmod +x runctest.sh + +# Submit the job to the queue +jobid=`qsub -t 20 -n 4 --proccount 4 \ + --env PIO_DASHBOARD_SITE=$PIO_DASHBOARD_SITE \ + --env PIO_DASHBOARD_BUILD_NAME=$PIO_DASHBOARD_BUILD_NAME \ + --env PIO_DASHBOARD_SOURCE_DIR=$PIO_DASHBOARD_SOURCE_DIR \ + --env PIO_DASHBOARD_BINARY_DIR=$PIO_DASHBOARD_BINARY_DIR \ + --mode script runctest.sh` + +# Wait for the job to complete before exiting +while true; do + status=`qstat $jobid` + if [ "$status" == "" ]; then + break + else + sleep 10 + fi +done diff --git a/ParallelIO/ctest/runctest-anlworkstation.sh b/ParallelIO/ctest/runctest-anlworkstation.sh new file mode 100755 index 0000000000..9718a83480 --- /dev/null +++ b/ParallelIO/ctest/runctest-anlworkstation.sh @@ -0,0 +1,18 @@ +#!/bin/sh +#============================================================================== +# +# This script defines how to run CTest on the Argonne Linux workstations. +# +# This assumes the CTest model name (e.g., "Nightly") is passed to it when +# run. +# +#============================================================================== + +# Get the CTest script directory +scrdir=$1 + +# Get the CTest model name +model=$2 + +# Run the "ctest" command in another process +ctest -S ${scrdir}/CTestScript-Test.cmake,${model} -V diff --git a/ParallelIO/ctest/runctest-cgd.sh b/ParallelIO/ctest/runctest-cgd.sh new file mode 100755 index 0000000000..bbd31ccf5d --- /dev/null +++ b/ParallelIO/ctest/runctest-cgd.sh @@ -0,0 +1,45 @@ +#!/bin/sh +#============================================================================== +# +# This script defines how to run CTest on the NCAR CGD local cluster +# Hobart. +# +# This assumes the CTest model name (e.g., "Nightly") is passed to it when +# run. +# +#============================================================================== + +# Get the CTest script directory +scrdir=$1 + +# Get the CTest model name +model=$2 + +# Write QSUB submission script with the test execution command +echo "#!/bin/sh" > runctest.sh +echo "export PIO_DASHBOARD_BUILD_NAME=${PIO_DASHBOARD_BUILD_NAME}" >> runctest.sh +echo "export PIO_DASHBOARD_SOURCE_DIR=${PIO_DASHBOARD_BINARY_DIR}/../src/" >> runctest.sh +echo "export PIO_DASHBOARD_BINARY_DIR=${PIO_DASHBOARD_BINARY_DIR}" >> runctest.sh +echo "export PIO_DASHBOARD_SITE=cgd-${HOSTNAME}" >> runctest.sh + +echo "CTESTCMD=`which ctest`" >> runctest.sh +echo "\$CTESTCMD -S ${scrdir}/CTestScript-Test.cmake,${model} -V" >> runctest.sh + +# Make the QSUB script executable +chmod +x runctest.sh + +# Submit the job to the queue +jobid=`/usr/local/bin/qsub -l nodes=1:ppn=8 runctest.sh -q short` + +# Wait for the job to complete before exiting +while true; do + status=`/usr/local/bin/qstat $jobid` + echo $status + if [ "$status" == "" ]; then + break + else + sleep 10 + fi +done + +exit 0 diff --git a/ParallelIO/ctest/runctest-ncsa.sh b/ParallelIO/ctest/runctest-ncsa.sh new file mode 100755 index 0000000000..c3cd75e300 --- /dev/null +++ b/ParallelIO/ctest/runctest-ncsa.sh @@ -0,0 +1,39 @@ +#!/bin/sh +#============================================================================== +# +# This script defines how to run CTest on the National Center for +# Supercomputing Applications system (blue waters). +# +# This assumes the CTest model name (e.g., "Nightly") is passed to it when +# run. +# +#============================================================================== + +# Get the CTest script directory +scrdir=$1 + +# Get the CTest model name +model=$2 + +# Write QSUB submission script with the test execution command +echo "#!/bin/sh" > runctest.pbs +echo "#PBS -q debug" >> runctest.pbs +echo "#PBS -l mppwidth=24" >> runctest.pbs +echo "#PBS -l walltime=00:20:00" >> runctest.pbs +echo "#PBS -v PIO_DASHBOARD_SITE,PIO_DASHBOARD_BUILD_NAME,PIO_DASHBOARD_SOURCE_DIR,PIO_DASHBOARD_BINARY_DIR" >> runctest.pbs +echo "cd \$PBS_O_WORKDIR" >> runctest.pbs +echo "CTEST_CMD=`which ctest`" >> runctest.pbs +echo "\$CTEST_CMD -S ${scrdir}/CTestScript-Test.cmake,${model} -V" >> runctest.pbs + +# Submit the job to the queue +jobid=`qsub runctest.pbs` + +# Wait for the job to complete before exiting +while true; do + status=`qstat $jobid` + if [ "$status" == "" ]; then + break + else + sleep 10 + fi +done diff --git a/ParallelIO/ctest/runctest-nersc.sh b/ParallelIO/ctest/runctest-nersc.sh new file mode 100755 index 0000000000..a84d26bbeb --- /dev/null +++ b/ParallelIO/ctest/runctest-nersc.sh @@ -0,0 +1,57 @@ +#!/bin/sh +#============================================================================== +# +# This script defines how to run CTest on the National Energy Research +# Scientific Computing Center systems (edison/cori). +# +# This assumes the CTest model name (e.g., "Nightly") is passed to it when +# run. +# +#============================================================================== + +# Get the CTest script directory +scrdir=$1 + +# Get the CTest model name +model=$2 + +# Write QSUB submission script with the test execution command +echo "#!/bin/sh" > runctest.slurm +echo "#SBATCH --partition debug" >> runctest.slurm +echo "#SBATCH --nodes=1" >> runctest.slurm +case "$NERSC_HOST" in + edison) + echo "#SBATCH --ntasks-per-node=32" >> runctest.slurm + ;; + cori) + echo "#SBATCH --ntasks-per-node=68" >> runctest.slurm + echo "#SBATCH -C knl" >> runctest.slurm + ;; +esac + +echo "#SBATCH --time=01:00:00" >> runctest.slurm + +echo "#SBATCH --export PIO_DASHBOARD_SITE,PIO_DASHBOARD_BUILD_NAME,PIO_DASHBOARD_SOURCE_DIR,PIO_DASHBOARD_BINARY_DIR" >> runctest.slurm +#echo "cd \$PBS_O_WORKDIR" >> runctest.pbs +echo "CTEST_CMD=`which ctest`" >> runctest.slurm +echo "\$CTEST_CMD -S ${scrdir}/CTestScript-Test.cmake,${model} -V" >> runctest.slurm +chmod +x runctest.slurm +# Submit the job to the queue +#jobid=`sbatch runctest.slurm| egrep -o -e "\b[0-9]+$"` +case "$NERSC_HOST" in + edison) + salloc -N 1 ./runctest.slurm + ;; + cori) + salloc -N 1 -C knl ./runctest.slurm + ;; +esac +# Wait for the job to complete before exiting +#while true; do +# status=`squeue -j $jobid` +# if [ "$status" == "" ]; then +# break +# else +# sleep 10 +# fi +#done diff --git a/ParallelIO/ctest/runctest-nwscla.sh b/ParallelIO/ctest/runctest-nwscla.sh new file mode 100755 index 0000000000..d3e252317d --- /dev/null +++ b/ParallelIO/ctest/runctest-nwscla.sh @@ -0,0 +1,44 @@ +#!/bin/sh +#============================================================================== +# +# This script defines how to run CTest on the NCAR Wyoming Supercomputing +# Center systems (cheyenne/laramie). +# +# This assumes the CTest model name (e.g., "Nightly") is passed to it when +# run. +# +#============================================================================== + +# Get the CTest script directory +scrdir=$1 + +# Get the CTest model name +model=$2 + +# Write QSUB submission script with the test execution command +echo "#!/bin/sh" > runctest.sh +echo "#PBS -l walltime=01:00:00" >> runctest.sh +echo "#PBS -l select=1:ncpus=8:mpiprocs=8" >> runctest.sh +echo "#PBS -A P93300606" >> runctest.sh +echo "#PBS -q regular" >> runctest.sh +echo "export PIO_DASHBOARD_SITE=nwscla-${HOSTNAME}" >> runctest.sh +echo "CTESTCMD=`which ctest`" >> runctest.sh +echo "\$CTESTCMD -S ${scrdir}/CTestScript-Test.cmake,${model} -V" >> runctest.sh + +# Make the QSUB script executable +chmod +x runctest.sh + +# Submit the job to the queue +jobid=`qsub -l walltime=01:00:00 runctest.sh` + +# Wait for the job to complete before exiting +while true; do + qstat $jobid + if [ $? -eq 0 ]; then + sleep 30 + else + break; + fi +done + +exit 0 diff --git a/ParallelIO/ctest/runctest-unknown.sh b/ParallelIO/ctest/runctest-unknown.sh new file mode 100755 index 0000000000..01ba66403c --- /dev/null +++ b/ParallelIO/ctest/runctest-unknown.sh @@ -0,0 +1,18 @@ +#!/bin/sh +#============================================================================== +# +# This script defines how to run CTest on the default ("unknown") machine. +# +# This assumes the CTest model name (e.g., "Nightly") is passed to it when +# run. +# +#============================================================================== + +# Get the CTest script directory +scrdir=$1 + +# Get the dashboard model name +model=$2 + +# Run the "ctest" command in another process +ctest -S ${scrdir}/CTestScript-Test.cmake,${model} -V diff --git a/ParallelIO/doc/CMakeFiles/3.2.3/CMakeSystem.cmake b/ParallelIO/doc/CMakeFiles/3.2.3/CMakeSystem.cmake new file mode 100644 index 0000000000..c94e370e1b --- /dev/null +++ b/ParallelIO/doc/CMakeFiles/3.2.3/CMakeSystem.cmake @@ -0,0 +1,15 @@ +set(CMAKE_HOST_SYSTEM "Linux-3.10.0-123.el7.x86_64") +set(CMAKE_HOST_SYSTEM_NAME "Linux") +set(CMAKE_HOST_SYSTEM_VERSION "3.10.0-123.el7.x86_64") +set(CMAKE_HOST_SYSTEM_PROCESSOR "x86_64") + + + +set(CMAKE_SYSTEM "Linux-3.10.0-123.el7.x86_64") +set(CMAKE_SYSTEM_NAME "Linux") +set(CMAKE_SYSTEM_VERSION "3.10.0-123.el7.x86_64") +set(CMAKE_SYSTEM_PROCESSOR "x86_64") + +set(CMAKE_CROSSCOMPILING "FALSE") + +set(CMAKE_SYSTEM_LOADED 1) diff --git a/ParallelIO/doc/CMakeFiles/cmake.check_cache b/ParallelIO/doc/CMakeFiles/cmake.check_cache new file mode 100644 index 0000000000..3dccd73172 --- /dev/null +++ b/ParallelIO/doc/CMakeFiles/cmake.check_cache @@ -0,0 +1 @@ +# This file is generated by cmake for dependency checking of the CMakeCache.txt file diff --git a/ParallelIO/doc/CMakeLists.txt b/ParallelIO/doc/CMakeLists.txt new file mode 100644 index 0000000000..5c7894b599 --- /dev/null +++ b/ParallelIO/doc/CMakeLists.txt @@ -0,0 +1,43 @@ +#============================================================================== +# +# API documentation with Doxygen +# +#============================================================================== + +find_package(Doxygen) + +if(DOXYGEN_FOUND) + # This supports the build with/witout internal documentation. + if (PIO_INTERNAL_DOC) + SET(C_SRC_FILES "${CMAKE_CURRENT_SOURCE_DIR}/../src/clib") + else () + SET(C_SRC_FILES + "${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pioc.c \\ +${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pio_nc4.c \\ +${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pio_darray.c \\ +${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pio_get_nc.c \\ +${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pio_put_nc.c \\ +${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pioc_async.c \\ +${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pio_file.c \\ +${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pio.h \\ +${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pio_nc.c \\ +${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/topology.c \\ +${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pioc_sc.c" ) + endif () + + # Process the Doxyfile using options set during configure. + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in + ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile @ONLY) + + # Copy necessary files. + add_custom_target(doc + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/customdoxygen.css + ${CMAKE_CURRENT_BINARY_DIR} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/DoxygenLayout.xml + ${CMAKE_CURRENT_BINARY_DIR} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/doxygen.sty + ${CMAKE_CURRENT_BINARY_DIR} + COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + COMMENT "Generating API documentation with Doxygen" VERBATIM) +endif(DOXYGEN_FOUND) diff --git a/ParallelIO/doc/Doxyfile.in b/ParallelIO/doc/Doxyfile.in new file mode 100644 index 0000000000..713d3b56aa --- /dev/null +++ b/ParallelIO/doc/Doxyfile.in @@ -0,0 +1,2389 @@ +# Doxyfile 1.8.9.1 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all text +# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv +# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv +# for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = PIO + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = @VERSION_MAJOR@.@VERSION_MINOR@.@VERSION_PATCH@ + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = .. + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = NO + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = YES + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = YES + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, Javascript, +# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: +# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: +# Fortran. In the later case the parser tries to guess whether the code is fixed +# or free formatted code, this is the default for Fortran type files), VHDL. For +# instance to make doxygen treat .inc files as Fortran files (default is PHP), +# and .f files as C (default is Fortran), use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = f90=Fortran + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = NO + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# (class|struct|union) declarations. If set to NO, these declarations will be +# included in the documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES, upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = NO + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = @CMAKE_CURRENT_SOURCE_DIR@/DoxygenLayout.xml + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = NO + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = doxywarn.log + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. +# Note: If this tag is empty the current directory is searched. + +INPUT = @CMAKE_CURRENT_SOURCE_DIR@/../doc/source \ + @CMAKE_CURRENT_SOURCE_DIR@/../examples/c \ + @CMAKE_CURRENT_SOURCE_DIR@/../examples/f03 \ + @FORTRAN_SRC_FILES@ \ + @C_SRC_FILES@ + + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank the +# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, +# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, +# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, +# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, +# *.qsf, *.as and *.js. + +FILE_PATTERNS = *.c \ + *.h \ + *.inc \ + *.dox \ + *.f90 \ + *.F90 \ + *.txt + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = gptl \ + @CMAKE_BINARY_DIR@/src/flib/*.dir \ + @CMAKE_BINARY_DIR@/src/flib/genf90 \ + ../src/clib/uthash.h \ + _UNUSED_ + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = ./source/example \ + ../examples/basic + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = * + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = ./images + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# function all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see http://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: NO. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = docs + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +#HTML_EXTRA_STYLESHEET = ../../docs/customdoxygen.css +HTML_EXTRA_STYLESHEET = customdoxygen.css + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to NO can help when comparing the output of multiple runs. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: http://developer.apple.com/tools/xcode/), introduced with +# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the master .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# http://www.mathjax.org) which uses client side Javascript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from http://www.mathjax.org before deployment. +# The default value is: http://cdn.mathjax.org/mathjax/latest. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /