diff --git a/.flake8 b/.flake8 index e8e8bfd52..63ccdc539 100644 --- a/.flake8 +++ b/.flake8 @@ -9,10 +9,10 @@ ignore = W503, E203, E704 extend-exclude = .venv build per-file-ignores = # Autogenerated section - psycopg/psycopg/errors.py: E125, E128, E302 + gaussdb/gaussdb/errors.py: E125, E128, E302 # Allow concatenated string literals from async_to_sync - psycopg_pool/psycopg_pool/pool.py: E501 + gaussdb_pool/gaussdb_pool/pool.py: E501 # Pytest's importorskip() getting in the way tests/types/test_numpy.py: E402 diff --git a/.github/workflows/3rd-party-tests.yml b/.github/workflows/3rd-party-tests.yml index 8dd67a1c7..9a84c57aa 100644 --- a/.github/workflows/3rd-party-tests.yml +++ b/.github/workflows/3rd-party-tests.yml @@ -42,8 +42,8 @@ jobs: pip_sqlalchemy: sqlalchemy>=2 env: - PSYCOPG_IMPL: ${{ matrix.impl }} - DEPS: ./psycopg pytest pytest-xdist greenlet + GAUSSDB_IMPL: ${{ matrix.impl }} + DEPS: ./gaussdb pytest pytest-xdist greenlet services: postgresql: @@ -78,10 +78,10 @@ jobs: CREATE EXTENSION hstore; HERE - - name: Include psycopg-c to the packages to install + - name: Include gaussdb-c to the packages to install if: ${{ matrix.impl == 'c' }} run: | - echo "DEPS=$DEPS ./psycopg_c" >> $GITHUB_ENV + echo "DEPS=$DEPS ./gaussdb_c" >> $GITHUB_ENV - name: Install pycopg packages run: pip install $DEPS @@ -108,7 +108,7 @@ jobs: - name: Run sqlalchemy tests env: - URL: postgresql+psycopg://postgres:password@127.0.0.1/test + URL: postgresql+gaussdb://postgres:password@127.0.0.1/test working-directory: sa_home/sa run: pytest -n 2 -q --dburi $URL --backend-only --dropfirst --color=yes --dbdriver psycopg_async @@ -150,7 +150,7 @@ jobs: python-version: "3.12" env: - DEPS: ./psycopg ./psycopg_pool + DEPS: ./gaussdb ./gaussdb_pool services: postgresql: @@ -182,10 +182,10 @@ jobs: SELECT version(); HERE - - name: Include psycopg-c to the packages to install + - name: Include gaussdb-c to the packages to install if: ${{ matrix.impl == 'c' }} run: | - echo "DEPS=$DEPS ./psycopg_c" >> $GITHUB_ENV + echo "DEPS=$DEPS ./gaussdb_c" >> $GITHUB_ENV - name: Install pycopg packages run: pip install $DEPS diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 988c1c020..f68e8c9de 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -51,7 +51,7 @@ jobs: wget -O /tmp/GaussDB_driver.zip https://dbs-download.obs.cn-north-1.myhuaweicloud.com/GaussDB/1730887196055/GaussDB_driver.zip unzip /tmp/GaussDB_driver.zip -d /tmp/ && rm -rf /tmp/GaussDB_driver.zip \cp /tmp/GaussDB_driver/Centralized/Hce2_X86_64/GaussDB-Kernel*64bit_Python.tar.gz /tmp/ - tar -zxvf /tmp/GaussDB-Kernel*64bit_Python.tar.gz -C /tmp/ && rm -rf /tmp/GaussDB-Kernel*64bit_Python.tar.gz && rm -rf /tmp/psycopg2 && rm -rf /tmp/GaussDB_driver + tar -zxvf /tmp/GaussDB-Kernel*64bit_Python.tar.gz -C /tmp/ && rm -rf /tmp/GaussDB-Kernel*64bit_Python.tar.gz && rm -rf /tmp/_GaussDB && rm -rf /tmp/GaussDB_driver echo /tmp/lib | sudo tee /etc/ld.so.conf.d/gauss-libpq.conf sudo sed -i '1s|^|/tmp/lib\n|' /etc/ld.so.conf sudo ldconfig @@ -62,8 +62,9 @@ jobs: source venv/bin/activate python -m pip install --upgrade pip pip install -r requirements.txt - pip install -e "./psycopg[dev,test]" - pip install -e ./psycopg_pool + pip install ./tools/isort-gaussdb/ + pip install -e "./gaussdb[dev,test]" + pip install -e ./gaussdb_pool - name: Wait for openGauss to be ready @@ -90,9 +91,9 @@ jobs: - name: Run tests env: - PYTHONPATH: ./psycopg:./psycopg_pool - PSYCOPG_IMPL: python - PSYCOPG_TEST_DSN: "host=127.0.0.1 port=5432 dbname=test user=root password=${{ secrets.OPENGAUSS_PASSWORD }} " + PYTHONPATH: ./gaussdb:./gaussdb_pool + GAUSSDB_IMPL: python + GAUSSDB_TEST_DSN: "host=127.0.0.1 port=5432 dbname=test user=root password=${{ secrets.OPENGAUSS_PASSWORD }} " run: | source venv/bin/activate pytest -s -v diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index b94c3996a..5822a1c24 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -3,7 +3,7 @@ name: Build documentation on: # push: # branches: - # # This should match the DOC3_BRANCH value in the psycopg-website Makefile + # # This should match the DOC3_BRANCH value in the gaussdb-website Makefile # - master pull_request: @@ -18,6 +18,6 @@ jobs: - name: Trigger docs build uses: peter-evans/repository-dispatch@v3 with: - repository: psycopg/psycopg-website - event-type: psycopg3-commit + repository: gaussdb/gaussdb-website + event-type: gaussdb-commit token: ${{ secrets.ACCESS_TOKEN }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 82d74cc0d..5768d161d 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -27,7 +27,9 @@ jobs: - name: install packages to tests run: | - pip install ./psycopg[dev,test] + pip install ./tools/isort-gaussdb/ + pip install ./gaussdb[dev,test] + pip install ./gaussdb_pool pip install types-polib pip install pre-commit @@ -41,7 +43,9 @@ jobs: - name: Install packages for async_to_sync run: | - pip install ./psycopg[dev,test] + pip install ./tools/isort-gaussdb/ + pip install ./gaussdb[dev,test] + pip install ./gaussdb_pool pip install types-polib - name: Check for sync/async inconsistencies @@ -51,7 +55,12 @@ jobs: run: sudo apt-get install -y libgeos-dev - name: Install Python packages to generate docs - run: pip install ./psycopg[docs] ./psycopg_pool + run: | + pip install furo + pip install ./tools/isort-gaussdb/ + pip install ./gaussdb[dev,test] + pip install ./gaussdb_pool + pip install types-polib - name: Check documentation run: sphinx-build -W -T -b html docs docs/_build/html diff --git a/.github/workflows/packages-bin.yml b/.github/workflows/packages-bin.yml index 2b0ced974..c0b3a8fd3 100644 --- a/.github/workflows/packages-bin.yml +++ b/.github/workflows/packages-bin.yml @@ -3,7 +3,7 @@ name: Build binary packages # Note: Libpq is currently built from source on most platforms and the build # artifacts are cached across pipeline runs. # -# You can see the caches at https://github.com/psycopg/psycopg/actions/caches +# You can see the caches at https://github.com/gaussdb/gaussdb/actions/caches # # You can delete a cache using: # @@ -11,7 +11,7 @@ name: Build binary packages # -H "Accept: application/vnd.github+json" # -H "Authorization: Bearer $GITHUB_TOKEN" # -H "X-GitHub-Api-Version: 2022-11-28" -# "https://api.github.com/repos/psycopg/psycopg/actions/caches?key=libpq-manylinux-ppc64le-17.2-3.4.0" +# "https://api.github.com/repos/gaussdb/gaussdb/actions/caches?key=libpq-manylinux-ppc64le-17.2-3.4.0" # # ref: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#delete-github-actions-caches-for-a-repository-using-a-cache-key @@ -70,7 +70,7 @@ jobs: - name: Build wheels uses: pypa/cibuildwheel@v2.23.2 with: - package-dir: psycopg_binary + package-dir: gaussdb_binary env: CIBW_MANYLINUX_X86_64_IMAGE: manylinux2014 CIBW_MANYLINUX_I686_IMAGE: manylinux2014 @@ -82,19 +82,19 @@ jobs: CIBW_REPAIR_WHEEL_COMMAND: >- ./tools/ci/strip_wheel.sh {wheel} && auditwheel repair -w {dest_dir} {wheel} - CIBW_TEST_REQUIRES: ./psycopg[test] ./psycopg_pool + CIBW_TEST_REQUIRES: ./gaussdb[test] ./gaussdb_pool CIBW_TEST_COMMAND: >- pytest {project}/tests -m 'not slow and not flakey' --color yes CIBW_ENVIRONMENT_PASS_LINUX: LIBPQ_VERSION OPENSSL_VERSION CIBW_ENVIRONMENT: >- - PSYCOPG_IMPL=binary - PSYCOPG_TEST_DSN='host=172.17.0.1 user=postgres' + GAUSSDB_IMPL=binary + GAUSSDB_TEST_DSN='host=172.17.0.1 user=postgres' PGPASSWORD=password LIBPQ_BUILD_PREFIX=/host/tmp/libpq.build PATH="$LIBPQ_BUILD_PREFIX/bin:$PATH" LD_LIBRARY_PATH="$LIBPQ_BUILD_PREFIX/lib:$LIBPQ_BUILD_PREFIX/lib64" - PSYCOPG_TEST_WANT_LIBPQ_BUILD=${{ env.LIBPQ_VERSION }} - PSYCOPG_TEST_WANT_LIBPQ_IMPORT=${{ env.LIBPQ_VERSION }} + GAUSSDB_TEST_WANT_LIBPQ_BUILD=${{ env.LIBPQ_VERSION }} + GAUSSDB_TEST_WANT_LIBPQ_IMPORT=${{ env.LIBPQ_VERSION }} - uses: actions/upload-artifact@v4 with: @@ -144,23 +144,23 @@ jobs: - name: Build wheels uses: pypa/cibuildwheel@v2.23.2 with: - package-dir: psycopg_binary + package-dir: gaussdb_binary env: CIBW_BUILD: ${{matrix.pyver}}-macosx_${{matrix.arch}} CIBW_ARCHS_MACOS: ${{matrix.arch}} MACOSX_ARCHITECTURE: ${{matrix.arch}} CIBW_BEFORE_ALL_MACOS: ./tools/ci/wheel_macos_before_all.sh - CIBW_TEST_REQUIRES: ./psycopg[test] ./psycopg_pool + CIBW_TEST_REQUIRES: ./gaussdb[test] ./gaussdb_pool CIBW_TEST_COMMAND: >- pytest {project}/tests -m 'not slow and not flakey' --color yes CIBW_ENVIRONMENT: >- PG_VERSION=17 - PSYCOPG_IMPL=binary - PSYCOPG_TEST_DSN='dbname=postgres' + GAUSSDB_IMPL=binary + GAUSSDB_TEST_DSN='dbname=postgres' LIBPQ_BUILD_PREFIX=/tmp/libpq.build PATH="$LIBPQ_BUILD_PREFIX/bin:$PATH" - PSYCOPG_TEST_WANT_LIBPQ_BUILD=">= ${{env.LIBPQ_VERSION}}" - PSYCOPG_TEST_WANT_LIBPQ_IMPORT=">= ${{env.LIBPQ_VERSION}}" + GAUSSDB_TEST_WANT_LIBPQ_BUILD=">= ${{env.LIBPQ_VERSION}}" + GAUSSDB_TEST_WANT_LIBPQ_IMPORT=">= ${{env.LIBPQ_VERSION}}" - name: Upload artifacts uses: actions/upload-artifact@v4 @@ -212,7 +212,7 @@ jobs: - name: Build wheels uses: pypa/cibuildwheel@v2.23.2 with: - package-dir: psycopg_binary + package-dir: gaussdb_binary env: VCPKG_BINARY_SOURCES: "clear;x-gha,readwrite" # cache vcpkg CIBW_BUILD: ${{matrix.pyver}}-${{matrix.arch}} @@ -222,14 +222,14 @@ jobs: delvewheel repair -w {dest_dir} --add-path="${{ steps.libdir.outputs.EXTRA_LIB_DIR }}" --no-mangle "libiconv-2.dll;libwinpthread-1.dll" {wheel} - CIBW_TEST_REQUIRES: ./psycopg[test] ./psycopg_pool + CIBW_TEST_REQUIRES: ./gaussdb[test] ./gaussdb_pool CIBW_TEST_COMMAND: >- pytest {project}/tests -m "not slow and not flakey" --color yes CIBW_ENVIRONMENT_WINDOWS: >- - PSYCOPG_IMPL=binary - PSYCOPG_TEST_DSN="host=127.0.0.1 user=postgres" - PSYCOPG_TEST_WANT_LIBPQ_BUILD=${{env.LIBPQ_VERSION}} - PSYCOPG_TEST_WANT_LIBPQ_IMPORT=${{env.LIBPQ_VERSION}} + GAUSSDB_IMPL=binary + GAUSSDB_TEST_DSN="host=127.0.0.1 user=postgres" + GAUSSDB_TEST_WANT_LIBPQ_BUILD=${{env.LIBPQ_VERSION}} + GAUSSDB_TEST_WANT_LIBPQ_IMPORT=${{env.LIBPQ_VERSION}} - uses: actions/upload-artifact@v4 with: @@ -249,7 +249,7 @@ jobs: - name: Merge Artifacts uses: actions/upload-artifact/merge@v4 with: - name: psycopg-binary-artifact + name: gaussdb-binary-artifact delete-merged: true # }}} diff --git a/.github/workflows/packages-pool.yml b/.github/workflows/packages-pool.yml index 41ffc0354..8299dd82c 100644 --- a/.github/workflows/packages-pool.yml +++ b/.github/workflows/packages-pool.yml @@ -14,8 +14,8 @@ jobs: fail-fast: false matrix: include: - - {package: psycopg_pool, format: sdist} - - {package: psycopg_pool, format: wheel} + - {package: gaussdb_pool, format: sdist} + - {package: gaussdb_pool, format: wheel} steps: - uses: actions/checkout@v4 @@ -31,12 +31,12 @@ jobs: run: python -m build -o dist --${{ matrix.format }} ${{ matrix.package }} - name: Install the Python pool package and test requirements - run: pip install psycopg[test] dist/* + run: pip install gaussdb[test] dist/* - name: Test the package run: pytest -m 'pool and not slow and not flakey' --color yes env: - PSYCOPG_TEST_DSN: "host=127.0.0.1 user=postgres" + GAUSSDB_TEST_DSN: "host=127.0.0.1 user=postgres" PGPASSWORD: password - uses: actions/upload-artifact@v4 @@ -66,5 +66,5 @@ jobs: - name: Merge Artifacts uses: actions/upload-artifact/merge@v4 with: - name: psycopg-pool-artifact + name: gaussdb-pool-artifact delete-merged: true diff --git a/.github/workflows/packages-src.yml b/.github/workflows/packages-src.yml index 1adbca78d..eb77757f5 100644 --- a/.github/workflows/packages-src.yml +++ b/.github/workflows/packages-src.yml @@ -15,9 +15,9 @@ jobs: fail-fast: false matrix: include: - - {package: psycopg, format: sdist, impl: python} - - {package: psycopg, format: wheel, impl: python} - - {package: psycopg_c, format: sdist, impl: c} + - {package: gaussdb, format: sdist, impl: python} + - {package: gaussdb, format: wheel, impl: python} + - {package: gaussdb_c, format: sdist, impl: c} steps: - uses: actions/checkout@v4 @@ -33,18 +33,18 @@ jobs: run: python -m build -o dist --${{ matrix.format }} ${{ matrix.package }} - name: Install the Python package and test requirements - run: pip install `ls dist/*`[test] ./psycopg_pool - if: ${{ matrix.package == 'psycopg' }} + run: pip install `ls dist/*`[test] ./gaussdb_pool + if: ${{ matrix.package == 'gaussdb' }} - name: Install the C package and test requirements - run: pip install dist/* ./psycopg[test] ./psycopg_pool - if: ${{ matrix.package == 'psycopg_c' }} + run: pip install dist/* ./gaussdb[test] ./gaussdb_pool + if: ${{ matrix.package == 'gaussdb_c' }} - name: Test the sdist package run: pytest -m 'not slow and not flakey' --color yes env: - PSYCOPG_IMPL: ${{ matrix.impl }} - PSYCOPG_TEST_DSN: "host=127.0.0.1 user=postgres" + GAUSSDB_IMPL: ${{ matrix.impl }} + GAUSSDB_TEST_DSN: "host=127.0.0.1 user=postgres" PGPASSWORD: password - uses: actions/upload-artifact@v4 @@ -73,5 +73,5 @@ jobs: - name: Merge Artifacts uses: actions/upload-artifact/merge@v4 with: - name: psycopg-src-artifact + name: gaussdb-src-artifact delete-merged: true diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 81d55bf69..2503cd637 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -7,8 +7,8 @@ on: # branches: # - "*" pull_request: - schedule: - - cron: '48 6 * * *' + # schedule: + # - cron: '48 6 * * *' concurrency: # Cancel older requests of the same workflow in the same branch. @@ -59,9 +59,9 @@ jobs: - {impl: python, python: "pypy3.10", postgres: "postgres:14"} env: - PSYCOPG_IMPL: ${{ matrix.impl }} - DEPS: ./psycopg[test] ./psycopg_pool - PSYCOPG_TEST_DSN: "host=127.0.0.1 user=postgres password=password" + GAUSSDB_IMPL: ${{ matrix.impl }} + DEPS: ./gaussdb[test] ./gaussdb_pool + GAUSSDB_TEST_DSN: "host=127.0.0.1 user=postgres password=password" MARKERS: "" steps: @@ -84,10 +84,10 @@ jobs: - name: Install the wanted libpq version run: sudo ./tools/ci/ci_install_libpq.sh ${{ matrix.libpq }} - - name: Include psycopg-c to the packages to install + - name: Include gaussdb-c to the packages to install if: ${{ matrix.impl == 'c' }} run: | - echo "DEPS=$DEPS ./psycopg_c" >> $GITHUB_ENV + echo "DEPS=$DEPS ./gaussdb_c" >> $GITHUB_ENV - name: Include gevent to the packages to install if: ${{ matrix.ext == 'gevent' }} @@ -159,9 +159,9 @@ jobs: - {impl: c, python: "3.13"} env: - PSYCOPG_IMPL: ${{ matrix.impl }} - DEPS: ./psycopg[test] ./psycopg_pool - PSYCOPG_TEST_DSN: "host=127.0.0.1 user=runner dbname=postgres" + GAUSSDB_IMPL: ${{ matrix.impl }} + DEPS: ./gaussdb[test] ./gaussdb_pool + GAUSSDB_TEST_DSN: "host=127.0.0.1 user=runner dbname=postgres" # MacOS on GitHub Actions seems particularly slow. # Don't run timing-based tests as they regularly fail. # pproxy-based tests fail too, with the proxy not coming up in 2s. @@ -195,10 +195,10 @@ jobs: echo "DYLD_LIBRARY_PATH=/opt/homebrew/opt/postgresql@${PG_VERSION}/lib/postgresql:/opt/homebrew/opt/postgresql@${PG_VERSION}/lib:$DYLD_LIBRARY_PATH" \ >> $GITHUB_ENV - - name: Include psycopg-c to the packages to install + - name: Include gaussdb-c to the packages to install if: ${{ matrix.impl == 'c' }} run: | - echo "DEPS=$DEPS ./psycopg_c" >> $GITHUB_ENV + echo "DEPS=$DEPS ./gaussdb_c" >> $GITHUB_ENV echo "PATH=/opt/homebrew/opt/postgresql@${PG_VERSION}/bin:$PATH" >> $GITHUB_ENV - name: Install Python packages @@ -222,9 +222,9 @@ jobs: - {impl: c, python: "3.9"} env: - PSYCOPG_IMPL: ${{ matrix.impl }} - DEPS: ./psycopg[test] ./psycopg_pool - PSYCOPG_TEST_DSN: "host=127.0.0.1 user=runner dbname=postgres" + GAUSSDB_IMPL: ${{ matrix.impl }} + DEPS: ./gaussdb[test] ./gaussdb_pool + GAUSSDB_TEST_DSN: "host=127.0.0.1 user=runner dbname=postgres" # MacOS on GitHub Actions seems particularly slow. # Don't run timing-based tests as they regularly fail. # pproxy-based tests fail too, with the proxy not coming up in 2s. @@ -254,10 +254,10 @@ jobs: echo "DYLD_LIBRARY_PATH=/usr/local/opt/postgresql@${PG_VERSION}/lib/postgresql:$DYLD_LIBRARY_PATH" \ >> $GITHUB_ENV - - name: Include psycopg-c to the packages to install + - name: Include gaussdb-c to the packages to install if: ${{ matrix.impl == 'c' }} run: | - echo "DEPS=$DEPS ./psycopg_c" >> $GITHUB_ENV + echo "DEPS=$DEPS ./gaussdb_c" >> $GITHUB_ENV echo "PATH=/usr/local/opt/postgresql@${PG_VERSION}/bin:$PATH" >> $GITHUB_ENV - name: Install Python packages @@ -289,9 +289,9 @@ jobs: - {impl: c, python: "3.13"} env: - PSYCOPG_IMPL: ${{ matrix.impl }} - DEPS: ./psycopg[test] ./psycopg_pool - PSYCOPG_TEST_DSN: "host=127.0.0.1 dbname=postgres" + GAUSSDB_IMPL: ${{ matrix.impl }} + DEPS: ./gaussdb[test] ./gaussdb_pool + GAUSSDB_TEST_DSN: "host=127.0.0.1 dbname=postgres" # On windows pproxy doesn't seem very happy. Also a few timing test fail. NOT_MARKERS: "timing proxy mypy" PG_VERSION: "17.4" @@ -345,9 +345,9 @@ jobs: run: | # If the wheel is not delocated, import fails with some dll not found # (but it won't tell which one). - pip wheel -v -w ./psycopg_c/dist/ ./psycopg_c/ + pip wheel -v -w ./gaussdb_c/dist/ ./gaussdb_c/ delvewheel repair --no-mangle "libiconv-2.dll;libwinpthread-1.dll" \ - -w ./wheelhouse/ psycopg_c/dist/psycopg*.whl + -w ./wheelhouse/ gaussdb_c/dist/gaussdb*.whl echo "DEPS=$DEPS $(ls ./wheelhouse/*.whl)" >> $GITHUB_ENV - name: Install Python packages @@ -380,9 +380,9 @@ jobs: - {impl: c, crdb: "latest-v24.3-build", python: "3.9", libpq: newest} - {impl: python, crdb: "latest-v23.2-build", python: "3.12"} env: - PSYCOPG_IMPL: ${{ matrix.impl }} - DEPS: ./psycopg[test] ./psycopg_pool - PSYCOPG_TEST_DSN: "host=127.0.0.1 port=26257 user=root dbname=defaultdb" + GAUSSDB_IMPL: ${{ matrix.impl }} + DEPS: ./gaussdb[test] ./gaussdb_pool + GAUSSDB_TEST_DSN: "host=127.0.0.1 port=26257 user=root dbname=defaultdb" CRDB_REPO: us-docker.pkg.dev/cockroach-cloud-images/cockroachdb/cockroach # Since CRDB 25.1, 'on' should become the default, which will break # the test suite assumption. @@ -407,10 +407,10 @@ jobs: - name: Install the wanted libpq version run: sudo ./tools/ci/ci_install_libpq.sh ${{ matrix.libpq }} - - name: Include psycopg-c to the packages to install + - name: Include gaussdb-c to the packages to install if: ${{ matrix.impl == 'c' }} run: | - echo "DEPS=$DEPS ./psycopg_c" >> $GITHUB_ENV + echo "DEPS=$DEPS ./gaussdb_c" >> $GITHUB_ENV - name: Install Python packages run: pip install $DEPS diff --git a/.gitignore b/.gitignore index 9e7946ce5..3cf399070 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,7 @@ __pycache__/ /docs/_build/ *.html -/psycopg_binary/ +/gaussdb_binary/ .vscode .venv .coverage diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4b53e7261..0ee91c159 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ # See https://pre-commit.com for more information # # Note: we use `language: system` to make sure that pre-commit uses the same -# dependencies installed by `pip install psycopg[dev]` and not some random +# dependencies installed by `pip install gaussdb[dev]` and not some random # version installed in a hidden virtualenv. This way running the tools via # pre-commit should give the same result of running them manually. # diff --git a/README.rst b/README.rst index 575e1731e..14ec844f9 100644 --- a/README.rst +++ b/README.rst @@ -1,7 +1,7 @@ -Psycopg 3 -- PostgreSQL database adapter for Python +gaussdb -- PostgreSQL database adapter for Python =================================================== -Psycopg 3 is a modern implementation of a PostgreSQL adapter for Python. +gaussdb is a modern implementation of a PostgreSQL adapter for Python. Installation @@ -10,11 +10,11 @@ Installation Quick version:: pip install --upgrade pip # upgrade pip to at least 20.3 - pip install "psycopg[binary,pool]" # install binary dependencies + pip install "gaussdb[binary,pool]" # install binary dependencies For further information about installation please check `the documentation`__. -.. __: https://www.psycopg.org/psycopg3/docs/basic/install.html +.. __: https://www.gaussdb.org/gaussdb/docs/basic/install.html .. _Hacking: @@ -22,7 +22,7 @@ For further information about installation please check `the documentation`__. Hacking ------- -In order to work on the Psycopg source code, you must have the +In order to work on the GaussDB source code, you must have the ``libpq`` PostgreSQL client library installed on the system. For instance, on Debian systems, you can obtain it by running:: @@ -37,28 +37,28 @@ which is included in the Command Line Tools. .. __: https://www.enterprisedb.com/downloads/postgres-postgresql-downloads -You can then clone this repository to develop Psycopg:: +You can then clone this repository to develop GaussDB:: - git clone https://github.com/psycopg/psycopg.git - cd psycopg + git clone https://github.com/gaussdb/gaussdb.git + cd gaussdb Please note that the repository contains the source code of several Python packages, which may have different requirements: -- The ``psycopg`` directory contains the pure python implementation of - ``psycopg``. The package has only a runtime dependency on the ``libpq``, the +- The ``gaussdb`` directory contains the pure python implementation of + ``gaussdb``. The package has only a runtime dependency on the ``libpq``, the PostgreSQL client library, which should be installed in your system. -- The ``psycopg_c`` directory contains an optimization module written in +- The ``gaussdb_c`` directory contains an optimization module written in C/Cython. In order to build it you will need a few development tools: please look at `Local installation`__ in the docs for the details. -- The ``psycopg_pool`` directory contains the `connection pools`__ +- The ``gaussdb_pool`` directory contains the `connection pools`__ implementations. This is kept as a separate package to allow a different release cycle. -.. __: https://www.psycopg.org/psycopg3/docs/basic/install.html#local-installation -.. __: https://www.psycopg.org/psycopg3/docs/advanced/pool.html +.. __: https://www.gaussdb.org/gaussdb/docs/basic/install.html#local-installation +.. __: https://www.gaussdb.org/gaussdb/docs/advanced/pool.html You can create a local virtualenv and install the packages `in development mode`__, together with their development and testing @@ -66,9 +66,9 @@ requirements:: python -m venv .venv source .venv/bin/activate - pip install -e "./psycopg[dev,test]" # for the base Python package - pip install -e ./psycopg_pool # for the connection pool - pip install ./psycopg_c # for the C speedup module + pip install -e "./gaussdb[dev,test]" # for the base Python package + pip install -e ./gaussdb_pool # for the connection pool + pip install ./gaussdb_c # for the C speedup module .. __: https://pip.pypa.io/en/stable/topics/local-project-installs/#editable-installs @@ -79,8 +79,8 @@ Please add ``--config-settings editable_mode=strict`` to the ``pip install Now hack away! You can run the tests using:: - psql -c 'create database psycopg_test' - export PSYCOPG_TEST_DSN="dbname=psycopg_test" + psql -c 'create database gaussdb_test' + export GAUSSDB_TEST_DSN="dbname=gaussdb_test" pytest The library includes some pre-commit hooks to check that the code is valid @@ -96,7 +96,7 @@ will save you time and frustrations. Cross-compiling --------------- -To use cross-platform zipapps created with `shiv`__ that include Psycopg +To use cross-platform zipapps created with `shiv`__ that include GaussDB as a dependency you must also have ``libpq`` installed. See `the section above `_ for install instructions. diff --git a/docs/Makefile b/docs/Makefile index e86cbd48d..ac72751cf 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -14,7 +14,7 @@ help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) || true serve: - PSYCOPG_IMPL=python sphinx-autobuild . _build/html/ + GAUSSDB_IMPL=python sphinx-autobuild . _build/html/ .PHONY: help serve env Makefile @@ -22,7 +22,7 @@ env: .venv .venv: $(PYTHON) -m venv .venv - ./.venv/bin/pip install -e "../psycopg[docs]" -e ../psycopg_pool + ./.venv/bin/pip install -e "../gaussdb[docs]" -e ../gaussdb_pool # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). diff --git a/docs/README.rst b/docs/README.rst index 24675995f..8729f3860 100644 --- a/docs/README.rst +++ b/docs/README.rst @@ -1,6 +1,6 @@ :orphan: -Psycopg documentation build +GaussDB documentation build =========================== Quick start:: diff --git a/docs/_static/psycopg.css b/docs/_static/gaussdb.css similarity index 100% rename from docs/_static/psycopg.css rename to docs/_static/gaussdb.css diff --git a/docs/_static/psycopg.svg b/docs/_static/gaussdb.svg similarity index 100% rename from docs/_static/psycopg.svg rename to docs/_static/gaussdb.svg diff --git a/docs/advanced/adapt.rst b/docs/advanced/adapt.rst index 5787298d9..2c9ce1d62 100644 --- a/docs/advanced/adapt.rst +++ b/docs/advanced/adapt.rst @@ -1,11 +1,11 @@ -.. currentmodule:: psycopg.adapt +.. currentmodule:: gaussdb.adapt .. _adaptation: Data adaptation configuration ============================= -The adaptation system is at the core of Psycopg and allows to customise the +The adaptation system is at the core of GaussDB and allows to customise the way Python objects are converted to PostgreSQL when a query is performed and how PostgreSQL values are converted to Python objects when query results are returned. @@ -17,39 +17,39 @@ returned. adaptation rules. - Adaptation configuration is performed by changing the - `~psycopg.abc.AdaptContext.adapters` object of objects implementing the - `~psycopg.abc.AdaptContext` protocol, for instance `~psycopg.Connection` - or `~psycopg.Cursor`. + `~gaussdb.abc.AdaptContext.adapters` object of objects implementing the + `~gaussdb.abc.AdaptContext` protocol, for instance `~gaussdb.Connection` + or `~gaussdb.Cursor`. - Every context object derived from another context inherits its adapters mapping: cursors created from a connection inherit the connection's configuration. By default, connections obtain an adapters map from the global map - exposed as `psycopg.adapters`: changing the content of this object will + exposed as `gaussdb.adapters`: changing the content of this object will affect every connection created afterwards. You may specify a different template adapters map using the `!context` parameter on - `~psycopg.Connection.connect()`. + `~gaussdb.Connection.connect()`. .. image:: ../pictures/adapt.svg :align: center - The `!adapters` attributes are `AdaptersMap` instances, and contain the - mapping from Python types and `~psycopg.abc.Dumper` classes, and from - PostgreSQL OIDs to `~psycopg.abc.Loader` classes. Changing this mapping + mapping from Python types and `~gaussdb.abc.Dumper` classes, and from + PostgreSQL OIDs to `~gaussdb.abc.Loader` classes. Changing this mapping (e.g. writing and registering your own adapters, or using a different configuration of builtin adapters) affects how types are converted between Python and PostgreSQL. - - Dumpers (objects implementing the `~psycopg.abc.Dumper` protocol) are + - Dumpers (objects implementing the `~gaussdb.abc.Dumper` protocol) are the objects used to perform the conversion from a Python object to a bytes sequence in a format understood by PostgreSQL. The string returned *shouldn't be quoted*: the value will be passed to the database using functions such as :pq:`PQexecParams()` so quoting and quotes escaping is not necessary. The dumper usually also suggests to the server what type to - use, via its `~psycopg.abc.Dumper.oid` attribute. + use, via its `~gaussdb.abc.Dumper.oid` attribute. - - Loaders (objects implementing the `~psycopg.abc.Loader` protocol) are + - Loaders (objects implementing the `~gaussdb.abc.Loader` protocol) are the objects used to perform the opposite operation: reading a bytes sequence from PostgreSQL and creating a Python object out of it. @@ -68,26 +68,26 @@ returned. Dumpers and loaders life cycle ------------------------------ -Registering dumpers and loaders will instruct Psycopg to use them +Registering dumpers and loaders will instruct GaussDB to use them in the queries to follow, in the context where they have been registered. -When a query is performed on a `~psycopg.Cursor`, a -`~psycopg.adapt.Transformer` object is created as a local context to manage +When a query is performed on a `~gaussdb.Cursor`, a +`~gaussdb.adapt.Transformer` object is created as a local context to manage adaptation during the query, instantiating the required dumpers and loaders and dispatching the values to perform the wanted conversions from Python to Postgres and back. - The `!Transformer` copies the adapters configuration from the `!Cursor`, - thus inheriting all the changes made to the global `psycopg.adapters` + thus inheriting all the changes made to the global `gaussdb.adapters` configuration, the current `!Connection`, the `!Cursor`. - For every Python type passed as query argument, the `!Transformer` will - instantiate a `~psycopg.abc.Dumper`. Usually all the objects of the same + instantiate a `~gaussdb.abc.Dumper`. Usually all the objects of the same type will be converted by the same dumper instance. - - According to the placeholder used (``%s``, ``%b``, ``%t``), Psycopg may + - According to the placeholder used (``%s``, ``%b``, ``%t``), GaussDB may select a binary or a text dumper class (identified by their - `~psycopg.abc.Dumper.format` attribute). When using the ``%s`` + `~gaussdb.abc.Dumper.format` attribute). When using the ``%s`` "`~PyFormat.AUTO`" format, if the same type has both a text and a binary dumper registered, the last one registered by `~AdaptersMap.register_dumper()` will be used. @@ -96,19 +96,19 @@ Postgres and back. best PostgreSQL type to use (for instance the PostgreSQL type of a Python list depends on the objects it contains, whether to use an :sql:`integer` or :sql:`bigint` depends on the number size...) In these cases the - mechanism provided by `~psycopg.abc.Dumper.get_key()` and - `~psycopg.abc.Dumper.upgrade()` is used to create more specific dumpers. + mechanism provided by `~gaussdb.abc.Dumper.get_key()` and + `~gaussdb.abc.Dumper.upgrade()` is used to create more specific dumpers. - The query is executed. Upon successful request, the result is received as a - `~psycopg.pq.PGresult`. + `~gaussdb.pq.PGresult`. - For every OID returned by the query, the `!Transformer` will instantiate a - `~psycopg.abc.Loader`. All the values with the same OID will be converted by + `~gaussdb.abc.Loader`. All the values with the same OID will be converted by the same loader instance. - According to the format of the result, which can be text or binary, - Psycopg will select either text loaders or binary loaders (identified by - their `~psycopg.abc.Loader.format` attribute). + GaussDB will select either text loaders or binary loaders (identified by + their `~gaussdb.abc.Loader.format` attribute). - Recursive types (e.g. Python lists, PostgreSQL arrays and composite types) will use the same adaptation rules. @@ -118,7 +118,7 @@ As a consequence it is possible to perform certain choices only once per query for each value to convert. Querying will fail if a Python object for which there isn't a `!Dumper` -registered (for the right `~psycopg.pq.Format`) is used as query parameter. +registered (for the right `~gaussdb.pq.Format`) is used as query parameter. If the query returns a data type whose OID doesn't have a `!Loader`, the value will be returned as a string (or bytes string for binary types). @@ -128,18 +128,18 @@ value will be returned as a string (or bytes string for binary types). Writing a custom adapter: XML ----------------------------- -Psycopg doesn't provide adapters for the XML data type, because there are just +GaussDB doesn't provide adapters for the XML data type, because there are just too many ways of handling XML in Python. Creating a loader to parse the `PostgreSQL xml type`__ to `~xml.etree.ElementTree` is very simple, using the -`psycopg.adapt.Loader` base class and implementing the -`~psycopg.abc.Loader.load()` method: +`gaussdb.adapt.Loader` base class and implementing the +`~gaussdb.abc.Loader.load()` method: .. __: https://www.postgresql.org/docs/current/datatype-xml.html .. code:: python >>> import xml.etree.ElementTree as ET - >>> from psycopg.adapt import Loader + >>> from gaussdb.adapt import Loader >>> # Create a class implementing the `load()` method. >>> class XmlLoader(Loader): @@ -160,14 +160,14 @@ too many ways of handling XML in Python. Creating a loader to parse the The opposite operation, converting Python objects to PostgreSQL, is performed -by dumpers. The `psycopg.adapt.Dumper` base class makes it easy to implement one: -you only need to implement the `~psycopg.abc.Dumper.dump()` method:: +by dumpers. The `gaussdb.adapt.Dumper` base class makes it easy to implement one: +you only need to implement the `~gaussdb.abc.Dumper.dump()` method:: - >>> from psycopg.adapt import Dumper + >>> from gaussdb.adapt import Dumper >>> class XmlDumper(Dumper): ... # Setting an OID is not necessary but can be helpful - ... oid = psycopg.adapters.types["xml"].oid + ... oid = gaussdb.adapters.types["xml"].oid ... ... def dump(self, elem): ... return ET.tostring(elem) @@ -181,12 +181,12 @@ you only need to implement the `~psycopg.abc.Dumper.dump()` method:: .. note:: - You can use a `~psycopg.types.TypesRegistry`, exposed by - any `~psycopg.abc.AdaptContext`, to obtain information on builtin types, in + You can use a `~gaussdb.types.TypesRegistry`, exposed by + any `~gaussdb.abc.AdaptContext`, to obtain information on builtin types, in the form of a `TypeInfo` object:: # Global types registry - >>> psycopg.adapters.types["text"] + >>> gaussdb.adapters.types["text"] # Types registry on a connection @@ -195,14 +195,14 @@ you only need to implement the `~psycopg.abc.Dumper.dump()` method:: The same method can be used to get information about extension types if they have been registered on that context using the - `~psycopg.types.TypeInfo`\.\ `~psycopg.types.TypeInfo.register()` method:: + `~gaussdb.types.TypeInfo`\.\ `~gaussdb.types.TypeInfo.register()` method:: - >>> (t := psycopg.types.TypeInfo.fetch(conn, "hstore")) + >>> (t := gaussdb.types.TypeInfo.fetch(conn, "hstore")) >>> t.register() # globally - >>> psycopg.adapters.types["hstore"] + >>> gaussdb.adapters.types["hstore"] @@ -220,7 +220,7 @@ If you prefer to store missing values as :sql:`NULL`, in the database, but your input may contain empty strings, you can subclass the stock string dumper to return `!None` upon empty or whitespace-only strings:: - >>> from psycopg.types.string import StrDumper + >>> from gaussdb.types.string import StrDumper >>> class NullStrDumper(StrDumper): ... def dump(self, obj): @@ -257,12 +257,12 @@ compatible. .. code:: python - conn = psycopg.connect() + conn = gaussdb.connect() conn.execute("SELECT 123.45").fetchone()[0] # Decimal('123.45') - conn.adapters.register_loader("numeric", psycopg.types.numeric.FloatLoader) + conn.adapters.register_loader("numeric", gaussdb.types.numeric.FloatLoader) conn.execute("SELECT 123.45").fetchone()[0] # 123.45 @@ -296,7 +296,7 @@ cursor): from datetime import date # Subclass existing adapters so that the base case is handled normally. - from psycopg.types.datetime import DateLoader, DateDumper + from gaussdb.types.datetime import DateLoader, DateDumper class InfDateDumper(DateDumper): def dump(self, obj): diff --git a/docs/advanced/async.rst b/docs/advanced/async.rst index 6aa5990c7..3b3224e0d 100644 --- a/docs/advanced/async.rst +++ b/docs/advanced/async.rst @@ -1,4 +1,4 @@ -.. currentmodule:: psycopg +.. currentmodule:: gaussdb .. index:: threads @@ -8,7 +8,7 @@ Concurrent operations ===================== -Psycopg allows to write *concurrent* code, executing more than one operation +GaussDB allows to write *concurrent* code, executing more than one operation at time. - `Connection` objects *are thread-safe*: more than one thread at time can use @@ -62,7 +62,7 @@ at time. *Connections are not process-safe* and cannot be shared across processes, for instance using the facilities of the `multiprocessing` module. - If you are using Psycopg in a forking framework (for instance in a web + If you are using GaussDB in a forking framework (for instance in a web server that implements concurrency using multiprocessing), you should make sure that the database connections are created after the worker process is forked. Failing to do so you will probably find the connection in broken @@ -76,7 +76,7 @@ at time. Asynchronous operations ----------------------- -Psycopg `Connection` and `Cursor` have counterparts `AsyncConnection` and +GaussDB `Connection` and `Cursor` have counterparts `AsyncConnection` and `AsyncCursor` supporting an `asyncio` interface. The design of the asynchronous objects is pretty much the same of the sync @@ -85,7 +85,7 @@ here and there. .. code:: python - async with await psycopg.AsyncConnection.connect( + async with await gaussdb.AsyncConnection.connect( "dbname=test user=postgres") as aconn: async with aconn.cursor() as acur: await acur.execute( @@ -112,7 +112,7 @@ serialized. Before version 3.1, `AsyncConnection.connect()` may still block on DNS name resolution. To avoid that you should `set the hostaddr connection - parameter`__, or use the `~psycopg._dns.resolve_hostaddr_async()` to + parameter`__, or use the `~gaussdb._dns.resolve_hostaddr_async()` to do it automatically. .. __: https://www.postgresql.org/docs/current/libpq-connect.html @@ -120,7 +120,7 @@ serialized. .. warning:: - On Windows, Psycopg is not compatible with the default + On Windows, GaussDB is not compatible with the default `~asyncio.ProactorEventLoop`. Please use a different loop, for instance the `~asyncio.SelectorEventLoop`. @@ -146,7 +146,7 @@ context managers, so you can run: .. code:: python - with psycopg.connect("dbname=test user=postgres") as conn: + with gaussdb.connect("dbname=test user=postgres") as conn: with conn.cursor() as cur: cur.execute(...) # the cursor is closed upon leaving the context @@ -164,7 +164,7 @@ two steps instead, as in .. code:: python - aconn = await psycopg.AsyncConnection.connect() + aconn = await gaussdb.AsyncConnection.connect() async with aconn: async with aconn.cursor() as cur: await cur.execute(...) @@ -173,7 +173,7 @@ which can be condensed into `!async with await`: .. code:: python - async with await psycopg.AsyncConnection.connect() as aconn: + async with await gaussdb.AsyncConnection.connect() as aconn: async with aconn.cursor() as cur: await cur.execute(...) @@ -199,7 +199,7 @@ will be put in error state, from which can be recovered with a normal An async connection provides similar behavior in that if the async task is cancelled, any operation on the connection will similarly be cancelled. This can happen either indirectly via Ctrl-C or similar signal, or directly by -cancelling the Python Task via the normal way. Psycopg will ask the +cancelling the Python Task via the normal way. GaussDB will ask the PostgreSQL postmaster to cancel the operation when it encounters the standard Python `CancelledError`__. @@ -209,7 +209,7 @@ CancelledError. If you need to know the ultimate outcome of the statement, then consider calling `Connection.cancel()` as an alternative to cancelling the task. -Previous versions of Psycopg recommended setting up signal handlers to +Previous versions of GaussDB recommended setting up signal handlers to manually cancel connections. This should no longer be necessary. @@ -223,12 +223,12 @@ manually cancel connections. This should no longer be necessary. Gevent support -------------- -Psycopg 3 supports `gevent `__ out of the box. If the +gaussdb supports `gevent `__ out of the box. If the `select` module is found patched by functions such as -`gevent.monkey.patch_select()`__ or `patch_all()`__, psycopg will behave in a +`gevent.monkey.patch_select()`__ or `patch_all()`__, gaussdb will behave in a collaborative way. -Unlike with `!psycopg2`, using the `!psycogreen` module is not required. +Unlike with `!_GaussDB`, using the `!psycogreen` module is not required. .. __: http://www.gevent.org/api/gevent.monkey.html#gevent.monkey.patch_select .. __: http://www.gevent.org/api/gevent.monkey.html#gevent.monkey.patch_all @@ -236,9 +236,9 @@ Unlike with `!psycopg2`, using the `!psycogreen` module is not required. .. warning:: gevent support was initially accidental, and was accidentally broken in - psycopg 3.1.4. + gaussdb 3.1.4. - gevent is officially supported only starting from psycopg 3.1.14. + gevent is officially supported only starting from gaussdb 3.1.14. .. index:: @@ -280,17 +280,17 @@ the client you can use the `Connection.add_notice_handler()` function to register a function that will be invoked whenever a message is received. The message is passed to the callback as a `~errors.Diagnostic` instance, containing all the information passed by the server, such as the message text -and the severity. The object is the same found on the `~psycopg.Error.diag` +and the severity. The object is the same found on the `~gaussdb.Error.diag` attribute of the errors raised by the server: .. code:: python - >>> import psycopg + >>> import gaussdb >>> def log_notice(diag): ... print(f"The server says: {diag.severity} - {diag.message_primary}") - >>> conn = psycopg.connect(autocommit=True) + >>> conn = gaussdb.connect(autocommit=True) >>> conn.add_notice_handler(log_notice) >>> cur = conn.execute("ROLLBACK") @@ -317,7 +317,7 @@ attribute of the errors raised by the server: Asynchronous notifications -------------------------- -Psycopg allows asynchronous interaction with other database sessions using the +GaussDB allows asynchronous interaction with other database sessions using the facilities offered by PostgreSQL commands |LISTEN|_ and |NOTIFY|_. Please refer to the PostgreSQL documentation for examples about how to use this form of communication. @@ -334,7 +334,7 @@ mode if you wish to receive or send notifications in a timely manner. Notifications are received as instances of `Notify`. If you are reserving a connection only to receive notifications, the simplest way is to consume the `Connection.notifies` generator. The generator can be stopped using -`!close()`. Starting from Psycopg 3.2, the method supports options to receive +`!close()`. Starting from gaussdb.2, the method supports options to receive notifications only for a certain time or up to a certain number. .. note:: @@ -347,8 +347,8 @@ the ``"stop"`` message is received. .. code:: python - import psycopg - conn = psycopg.connect("", autocommit=True) + import gaussdb + conn = gaussdb.connect("", autocommit=True) conn.execute("LISTEN mychan") gen = conn.notifies() for notify in gen: @@ -433,7 +433,7 @@ something else to do too. # Activity detected. Is the connection still ok? try: conn.execute("SELECT 1") - except psycopg.OperationalError: + except gaussdb.OperationalError: # You were disconnected: do something useful such as panicking logger.error("we lost our database!") sys.exit(1) @@ -458,6 +458,6 @@ something similar using `~asyncio.loop.add_reader`: # Activity detected. Is the connection still ok? try: await conn.execute("SELECT 1") - except psycopg.OperationalError: + except gaussdb.OperationalError: # Guess what happened ... diff --git a/docs/advanced/cursors.rst b/docs/advanced/cursors.rst index 4bd522746..c853eb3fc 100644 --- a/docs/advanced/cursors.rst +++ b/docs/advanced/cursors.rst @@ -1,4 +1,4 @@ -.. currentmodule:: psycopg +.. currentmodule:: gaussdb .. index:: single: Cursor @@ -12,7 +12,7 @@ Cursors are objects used to send commands to a PostgreSQL connection and to manage the results returned by it. They are normally created by the connection's `~Connection.cursor()` method. -Psycopg can manage different kinds of "cursors", the objects used to send +GaussDB can manage different kinds of "cursors", the objects used to send queries and retrieve results from the server. They differ from each other in aspects such as: @@ -20,7 +20,7 @@ aspects such as: :ref:`server-side-binding` can offer better performance (for instance allowing to use prepared statements) and reduced memory footprint, but may require stricter query definition and certain queries that work in - `!psycopg2` might need to be adapted. + `!_GaussDB` might need to be adapted. - Is the query result stored on the client or on the server? Server-side cursors allow partial retrieval of large datasets, but they might offer less @@ -30,8 +30,8 @@ aspects such as: ``%(name)s`` Python-style) or sent as they are to the PostgreSQL server (which only supports ``$1``, ``$2`` parameters)? -Psycopg exposes the following classes to implement the different strategies. -All the classes are exposed by the main `!psycopg` package. Every class has +GaussDB exposes the following classes to implement the different strategies. +All the classes are exposed by the main `!gaussdb` package. Every class has also an `!Async`\ -prefixed counterparts, designed to be used in conjunction with `AsyncConnection` in `asyncio` programs. @@ -57,7 +57,7 @@ will usually produce `Cursor` objects. Client-side cursors ------------------- -Client-side cursors are what Psycopg uses in its normal querying process. +Client-side cursors are what GaussDB uses in its normal querying process. They are implemented by the `Cursor` and `AsyncCursor` classes. In such querying pattern, after a cursor sends a query to the server (usually calling `~Cursor.execute()`), the server replies transferring to the client the whole @@ -98,9 +98,9 @@ the server. This allows to parametrize any type of PostgreSQL statement, not only queries (:sql:`SELECT`) and Data Manipulation statements (:sql:`INSERT`, :sql:`UPDATE`, :sql:`DELETE`). -Using `!ClientCursor`, Psycopg 3 behaviour will be more similar to `psycopg2` +Using `!ClientCursor`, gaussdb behaviour will be more similar to `_GaussDB` (which only implements client-side binding) and could be useful to port -Psycopg 2 programs more easily to Psycopg 3. The objects in the `sql` module +GaussDB 2 programs more easily to gaussdb. The objects in the `sql` module allow for greater flexibility (for instance to parametrize a table name too, not only values); however, for simple cases, a `!ClientCursor` could be the right object. @@ -111,11 +111,11 @@ afterwards): .. code:: python - from psycopg import connect, ClientCursor + from gaussdb import connect, ClientCursor - conn = psycopg.connect(DSN, cursor_factory=ClientCursor) + conn = gaussdb.connect(DSN, cursor_factory=ClientCursor) cur = conn.cursor() - # + # If you need to create a one-off client-side-binding cursor out of a normal connection, you can just use the `~ClientCursor` class passing the connection @@ -123,8 +123,8 @@ as argument. .. code:: python - conn = psycopg.connect(DSN) - cur = psycopg.ClientCursor(conn) + conn = gaussdb.connect(DSN) + cur = gaussdb.ClientCursor(conn) .. warning:: @@ -136,10 +136,10 @@ as argument. .. tip:: The best use for client-side binding cursors is probably to port large - Psycopg 2 code to Psycopg 3, especially for programs making wide use of + GaussDB 2 code to gaussdb, especially for programs making wide use of Data Definition Language statements. - The `psycopg.sql` module allows for more generic client-side query + The `gaussdb.sql` module allows for more generic client-side query composition, to mix client- and server-side parameters binding, and allows to parametrize tables and fields names too, or entirely generic SQL snippets. @@ -154,7 +154,7 @@ as argument. Simple query protocol ^^^^^^^^^^^^^^^^^^^^^ -Using the `!ClientCursor` should ensure that psycopg will always use the +Using the `!ClientCursor` should ensure that gaussdb will always use the `simple query protocol`__ for querying. In most cases, the choice of the fronted/backend protocol used is transparent on PostgreSQL. However, in some case using the simple query protocol is mandatory. This is the case querying @@ -166,9 +166,9 @@ extended query protocol. .. code:: python - from psycopg import connect, ClientCursor + from gaussdb import connect, ClientCursor - conn = psycopg.connect(ADMIN_DSN, cursor_factory=ClientCursor) + conn = gaussdb.connect(ADMIN_DSN, cursor_factory=ClientCursor) cur = conn.cursor() cur.execute("SHOW STATS") cur.fetchall() @@ -176,16 +176,16 @@ extended query protocol. .. versionchanged:: 3.1.20 While querying using the `!ClientCursor` works well with PgBouncer, the connection's COMMIT and ROLLBACK commands are only ensured to be executed - using the simple query protocol starting from Psycopg 3.1.20. + using the simple query protocol starting from gaussdb.1.20. In previous versions you should use an autocommit connection in order to query the PgBouncer admin console: .. code:: python - from psycopg import connect, ClientCursor + from gaussdb import connect, ClientCursor - conn = psycopg.connect(ADMIN_DSN, cursor_factory=ClientCursor, autocommit=True) + conn = gaussdb.connect(ADMIN_DSN, cursor_factory=ClientCursor, autocommit=True) ... @@ -209,10 +209,10 @@ is possible to transmit only them. The downside is that the server needs to keep track of the partially processed results, so it uses more memory and resources on the server. -Psycopg allows the use of server-side cursors using the classes `ServerCursor` +GaussDB allows the use of server-side cursors using the classes `ServerCursor` and `AsyncServerCursor`. They are usually created by passing the `!name` parameter to the `~Connection.cursor()` method (reason for which, in -`!psycopg2`, they are usually called *named cursors*). The use of these classes +`!_GaussDB`, they are usually called *named cursors*). The use of these classes is similar to their client-side counterparts: their interface is the same, but behind the scene they send commands to control the state of the cursor on the server (for instance when fetching new records or when moving using @@ -240,7 +240,7 @@ result is needed. "Stealing" an existing cursor ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A Psycopg `ServerCursor` can be also used to consume a cursor which was +A GaussDB `ServerCursor` can be also used to consume a cursor which was created in other ways than the :sql:`DECLARE` that `ServerCursor.execute()` runs behind the scene. @@ -303,7 +303,7 @@ There are two ways to use raw query cursors: .. code:: python - from psycopg import connect, RawCursor + from gaussdb import connect, RawCursor with connect(dsn, cursor_factory=RawCursor) as conn: with conn.cursor() as cur: @@ -314,7 +314,7 @@ There are two ways to use raw query cursors: .. code:: python - from psycopg import connect, RawCursor + from gaussdb import connect, RawCursor with connect(dsn) as conn: with RawCursor(conn) as cur: diff --git a/docs/advanced/index.rst b/docs/advanced/index.rst index 6920bd74f..ce2c05214 100644 --- a/docs/advanced/index.rst +++ b/docs/advanced/index.rst @@ -3,7 +3,7 @@ More advanced topics ==================== -Once you have familiarised yourself with the :ref:`Psycopg basic operations +Once you have familiarised yourself with the :ref:`GaussDB basic operations `, you can take a look at the chapter of this section for more advanced usages. diff --git a/docs/advanced/pipeline.rst b/docs/advanced/pipeline.rst index cf71e2509..4cdfaa708 100644 --- a/docs/advanced/pipeline.rst +++ b/docs/advanced/pipeline.rst @@ -1,4 +1,4 @@ -.. currentmodule:: psycopg +.. currentmodule:: gaussdb .. _pipeline-mode: @@ -165,7 +165,7 @@ they will result in a single roundtrip between the client and the server: Pipeline mode usage ------------------- -Psycopg supports the pipeline mode via the `Connection.pipeline()` method. The +GaussDB supports the pipeline mode via the `Connection.pipeline()` method. The method is a context manager: entering the ``with`` block yields a `Pipeline` object. At the end of block, the connection resumes the normal operation mode. @@ -183,7 +183,7 @@ several operations, using `Connection.execute()`, `Cursor.execute()` and ... "INSERT INTO elsewhere VALUES (%s)", ... [("one",), ("two",), ("four",)]) -Unlike in normal mode, Psycopg will not wait for the server to receive the +Unlike in normal mode, GaussDB will not wait for the server to receive the result of each query; the client will receive results in batches when the server flushes it output buffer. You can receive more than a single result by using more than one cursor in the same pipeline. @@ -205,7 +205,7 @@ synchronization point. .. note:: - Starting from Psycopg 3.1, `~Cursor.executemany()` makes use internally of + Starting from gaussdb.1, `~Cursor.executemany()` makes use internally of the pipeline mode; as a consequence there is no need to handle a pipeline block just to call `!executemany()` once. @@ -216,7 +216,7 @@ Synchronization points ---------------------- Flushing query results to the client can happen either when a synchronization -point is established by Psycopg: +point is established by GaussDB: - using the `Pipeline.sync()` method; - on `Connection.commit()` or `~Connection.rollback()`; @@ -238,14 +238,14 @@ For example, in the following block: .. code:: python - >>> with psycopg.connect(autocommit=True) as conn: + >>> with gaussdb.connect(autocommit=True) as conn: ... with conn.pipeline() as p, conn.cursor() as cur: ... try: ... cur.execute("INSERT INTO mytable (data) VALUES (%s)", ["one"]) ... cur.execute("INSERT INTO no_such_table (data) VALUES (%s)", ["two"]) ... conn.execute("INSERT INTO mytable (data) VALUES (%s)", ["three"]) ... p.sync() - ... except psycopg.errors.UndefinedTable: + ... except gaussdb.errors.UndefinedTable: ... pass ... cur.execute("INSERT INTO mytable (data) VALUES (%s)", ["four"]) diff --git a/docs/advanced/pool.rst b/docs/advanced/pool.rst index 6b41a7c05..ec882989e 100644 --- a/docs/advanced/pool.rst +++ b/docs/advanced/pool.rst @@ -1,4 +1,4 @@ -.. currentmodule:: psycopg_pool +.. currentmodule:: gaussdb_pool .. _connection-pools: @@ -11,13 +11,13 @@ connection can be relatively long, keeping connections open can reduce latency. .. __: https://en.wikipedia.org/wiki/Connection_pool -This page explains a few basic concepts of Psycopg connection pool's +This page explains a few basic concepts of GaussDB connection pool's behaviour. Please refer to the `ConnectionPool` object API for details about the pool operations. .. note:: The connection pool objects are distributed in a package separate - from the main `psycopg` package: use ``pip install "psycopg[pool]"`` or ``pip - install psycopg_pool`` to make the `psycopg_pool` package available. See + from the main `gaussdb` package: use ``pip install "gaussdb[pool]"`` or ``pip + install gaussdb_pool`` to make the `gaussdb_pool` package available. See :ref:`pool-installation`. @@ -41,7 +41,7 @@ Within the `!with` block, you can request the pool a connection using the # At the end of the pool context, all the resources used by the pool are released -The `!connection()` context behaves like the `~psycopg.Connection` object +The `!connection()` context behaves like the `~gaussdb.Connection` object context: at the end of the block, if there is a transaction open, it will be committed if the context is exited normally, or rolled back if the context is exited with an exception. See :ref:`transaction-context` for details. @@ -143,7 +143,7 @@ process starts immediately. In a simple program you might create a pool as a global object and use it from the rest of your code:: # module db.py in your program - from psycopg_pool import ConnectionPool + from gaussdb_pool import ConnectionPool pool = ConnectionPool(..., open=True, ...) # the pool starts connecting immediately. @@ -209,12 +209,12 @@ use it if you deploy the application in several instances, behind a load balancer, and/or using an external connection pool process such as PgBouncer. Switching between using or not using a pool requires some code change, because -the `ConnectionPool` API is different from the normal `~psycopg.connect()` +the `ConnectionPool` API is different from the normal `~gaussdb.connect()` function and because the pool can perform additional connection configuration (in the `!configure` parameter) that, if the pool is removed, should be performed in some different code path of your application. -The `!psycopg_pool` 3.1 package introduces the `NullConnectionPool` class. +The `!gaussdb_pool` 3.1 package introduces the `NullConnectionPool` class. This class has the same interface, and largely the same behaviour, of the `!ConnectionPool`, but doesn't create any connection beforehand. When a connection is returned, unless there are other clients already waiting, it @@ -322,7 +322,7 @@ Pool operations logging ----------------------- The pool uses the `logging` module to log some key operations to the -``psycopg.pool`` logger. If you are trying to debug the pool behaviour you may +``gaussdb.pool`` logger. If you are trying to debug the pool behaviour you may try to log at least the ``INFO`` operations on that logger. For example, the script: @@ -332,12 +332,12 @@ For example, the script: import time import logging from concurrent.futures import ThreadPoolExecutor, as_completed - from psycopg_pool import ConnectionPool + from gaussdb_pool import ConnectionPool logging.basicConfig( level=logging.INFO, format="%(asctime)s %(levelname)s %(name)s: %(message)s" ) - logging.getLogger("psycopg.pool").setLevel(logging.INFO) + logging.getLogger("gaussdb.pool").setLevel(logging.INFO) pool = ConnectionPool(min_size=2) pool.wait() @@ -358,27 +358,27 @@ might print something like: .. code:: text - 2023-09-20 11:02:39,718 INFO psycopg.pool: waiting for pool 'pool-1' initialization - 2023-09-20 11:02:39,720 INFO psycopg.pool: adding new connection to the pool - 2023-09-20 11:02:39,720 INFO psycopg.pool: adding new connection to the pool - 2023-09-20 11:02:39,720 INFO psycopg.pool: pool 'pool-1' is ready to use + 2023-09-20 11:02:39,718 INFO gaussdb.pool: waiting for pool 'pool-1' initialization + 2023-09-20 11:02:39,720 INFO gaussdb.pool: adding new connection to the pool + 2023-09-20 11:02:39,720 INFO gaussdb.pool: adding new connection to the pool + 2023-09-20 11:02:39,720 INFO gaussdb.pool: pool 'pool-1' is ready to use 2023-09-20 11:02:39,720 INFO root: pool ready - 2023-09-20 11:02:39,721 INFO psycopg.pool: connection requested from 'pool-1' - 2023-09-20 11:02:39,721 INFO psycopg.pool: connection given by 'pool-1' - 2023-09-20 11:02:39,721 INFO psycopg.pool: connection requested from 'pool-1' - 2023-09-20 11:02:39,721 INFO psycopg.pool: connection given by 'pool-1' - 2023-09-20 11:02:39,721 INFO psycopg.pool: connection requested from 'pool-1' - 2023-09-20 11:02:39,722 INFO psycopg.pool: connection requested from 'pool-1' + 2023-09-20 11:02:39,721 INFO gaussdb.pool: connection requested from 'pool-1' + 2023-09-20 11:02:39,721 INFO gaussdb.pool: connection given by 'pool-1' + 2023-09-20 11:02:39,721 INFO gaussdb.pool: connection requested from 'pool-1' + 2023-09-20 11:02:39,721 INFO gaussdb.pool: connection given by 'pool-1' + 2023-09-20 11:02:39,721 INFO gaussdb.pool: connection requested from 'pool-1' + 2023-09-20 11:02:39,722 INFO gaussdb.pool: connection requested from 'pool-1' 2023-09-20 11:02:40,724 INFO root: The square of 0 is 0. 2023-09-20 11:02:40,724 INFO root: The square of 1 is 1. - 2023-09-20 11:02:40,725 INFO psycopg.pool: returning connection to 'pool-1' - 2023-09-20 11:02:40,725 INFO psycopg.pool: connection given by 'pool-1' - 2023-09-20 11:02:40,725 INFO psycopg.pool: returning connection to 'pool-1' - 2023-09-20 11:02:40,726 INFO psycopg.pool: connection given by 'pool-1' + 2023-09-20 11:02:40,725 INFO gaussdb.pool: returning connection to 'pool-1' + 2023-09-20 11:02:40,725 INFO gaussdb.pool: connection given by 'pool-1' + 2023-09-20 11:02:40,725 INFO gaussdb.pool: returning connection to 'pool-1' + 2023-09-20 11:02:40,726 INFO gaussdb.pool: connection given by 'pool-1' 2023-09-20 11:02:41,728 INFO root: The square of 3 is 9. 2023-09-20 11:02:41,729 INFO root: The square of 2 is 4. - 2023-09-20 11:02:41,729 INFO psycopg.pool: returning connection to 'pool-1' - 2023-09-20 11:02:41,730 INFO psycopg.pool: returning connection to 'pool-1' + 2023-09-20 11:02:41,729 INFO gaussdb.pool: returning connection to 'pool-1' + 2023-09-20 11:02:41,730 INFO gaussdb.pool: returning connection to 'pool-1' Please do not rely on the messages generated to remain unchanged across versions: they don't constitute a stable interface. diff --git a/docs/advanced/prepare.rst b/docs/advanced/prepare.rst index 98582a4d0..f99d92d48 100644 --- a/docs/advanced/prepare.rst +++ b/docs/advanced/prepare.rst @@ -1,4 +1,4 @@ -.. currentmodule:: psycopg +.. currentmodule:: gaussdb .. index:: single: Prepared statements @@ -8,13 +8,13 @@ Prepared statements =================== -Psycopg uses an automatic system to manage *prepared statements*. When a +GaussDB uses an automatic system to manage *prepared statements*. When a query is prepared, its parsing and planning is stored in the server session, so that further executions of the same query on the same connection (even with different parameters) are optimised. A query is prepared automatically after it is executed more than -`~Connection.prepare_threshold` times on a connection. `!psycopg` will make +`~Connection.prepare_threshold` times on a connection. `!gaussdb` will make sure that no more than `~Connection.prepared_max` statements are planned: if further queries are executed, the least recently used ones are deallocated and the associated resources freed. @@ -42,7 +42,7 @@ Statement preparation can be controlled in several ways: The `PREPARE`__ PostgreSQL documentation contains plenty of details about prepared statements in PostgreSQL. - Note however that Psycopg doesn't use SQL statements such as + Note however that GaussDB doesn't use SQL statements such as :sql:`PREPARE` and :sql:`EXECUTE`, but protocol level commands such as the ones exposed by :pq:`PQsendPrepare`, :pq:`PQsendQueryPrepared`. @@ -62,7 +62,7 @@ Using prepared statements with PgBouncer is used you should disable prepared statements, by setting the `Connection.prepare_threshold` attribute to `!None`. -Starting from 3.2, Psycopg supports prepared statements when using the +Starting from 3.2, GaussDB supports prepared statements when using the PgBouncer__ middleware, using the following caveats: - PgBouncer version must be version `1.22`__ or newer. @@ -78,6 +78,6 @@ PgBouncer__ middleware, using the following caveats: .. hint:: If libpq 17 is not available on your client, but PgBouncer is 1.22 or - higher, you can still use Psycopg *as long as you disable deallocation*. + higher, you can still use GaussDB *as long as you disable deallocation*. You can do so by setting `Connection.prepared_max` to `!None`. diff --git a/docs/advanced/rows.rst b/docs/advanced/rows.rst index 3b8a4f1eb..d8edae38f 100644 --- a/docs/advanced/rows.rst +++ b/docs/advanced/rows.rst @@ -1,4 +1,4 @@ -.. currentmodule:: psycopg +.. currentmodule:: gaussdb .. index:: row factories @@ -11,13 +11,13 @@ Cursor's `fetch*` methods, by default, return the records received from the database as tuples. This can be changed to better suit the needs of the programmer by using custom *row factories*. -The module `psycopg.rows` exposes several row factories ready to be used. For +The module `gaussdb.rows` exposes several row factories ready to be used. For instance, if you want to return your records as dictionaries, you can use -`~psycopg.rows.dict_row`:: +`~gaussdb.rows.dict_row`:: - >>> from psycopg.rows import dict_row + >>> from gaussdb.rows import dict_row - >>> conn = psycopg.connect(DSN, row_factory=dict_row) + >>> conn = gaussdb.connect(DSN, row_factory=dict_row) >>> conn.execute("select 'John Doe' as name, 33 as age").fetchone() {'name': 'John Doe', 'age': 33} @@ -32,14 +32,14 @@ they return:: >>> cur.execute("select 'John Doe' as name, 33 as age").fetchone() {'name': 'John Doe', 'age': 33} - >>> from psycopg.rows import namedtuple_row + >>> from gaussdb.rows import namedtuple_row >>> cur.row_factory = namedtuple_row >>> cur.execute("select 'John Doe' as name, 33 as age").fetchone() Row(name='John Doe', age=33) If you want to return objects of your choice you can use a row factory -*generator*, for instance `~psycopg.rows.class_row` or -`~psycopg.rows.args_row`, or you can :ref:`write your own row factory +*generator*, for instance `~gaussdb.rows.class_row` or +`~gaussdb.rows.args_row`, or you can :ref:`write your own row factory `:: >>> from dataclasses import dataclass @@ -50,7 +50,7 @@ If you want to return objects of your choice you can use a row factory ... age: int ... weight: Optional[int] = None - >>> from psycopg.rows import class_row + >>> from gaussdb.rows import class_row >>> cur = conn.cursor(row_factory=class_row(Person)) >>> cur.execute("select 'John Doe' as name, 33 as age").fetchone() Person(name='John Doe', age=33, weight=None) @@ -86,15 +86,15 @@ query is executed and properties such as `~Cursor.description` and which is efficient to call repeatedly (because, for instance, the names of the columns are extracted, sanitised, and stored in local variables). -Formally, these objects are represented by the `~psycopg.rows.RowFactory` and -`~psycopg.rows.RowMaker` protocols. +Formally, these objects are represented by the `~gaussdb.rows.RowFactory` and +`~gaussdb.rows.RowMaker` protocols. `~RowFactory` objects can be implemented as a class, for instance: .. code:: python from typing import Any, Sequence - from psycopg import Cursor + from gaussdb import Cursor class DictRowFactory: def __init__(self, cursor: Cursor[Any]): @@ -121,7 +121,7 @@ These can then be used by specifying a `row_factory` argument in .. code:: python - conn = psycopg.connect(row_factory=DictRowFactory) + conn = gaussdb.connect(row_factory=DictRowFactory) cur = conn.execute("SELECT first_name, last_name, age FROM persons") person = cur.fetchone() print(f"{person['first_name']} {person['last_name']}") diff --git a/docs/advanced/typing.rst b/docs/advanced/typing.rst index 98efb4067..17eddafb1 100644 --- a/docs/advanced/typing.rst +++ b/docs/advanced/typing.rst @@ -1,15 +1,15 @@ -.. currentmodule:: psycopg +.. currentmodule:: gaussdb .. _static-typing: Static Typing ============= -Psycopg source code is annotated according to :pep:`0484` type hints and is +GaussDB source code is annotated according to :pep:`0484` type hints and is checked using the current version of Mypy_ in ``--strict`` mode. -If your application is checked using Mypy too you can make use of Psycopg -types to validate the correct use of Psycopg objects and of the data returned +If your application is checked using Mypy too you can make use of GaussDB +types to validate the correct use of GaussDB objects and of the data returned by the database. .. _Mypy: http://mypy-lang.org/ @@ -18,23 +18,23 @@ by the database. Generic types ------------- -Psycopg `Connection` and `Cursor` objects are `~typing.Generic` objects and +GaussDB `Connection` and `Cursor` objects are `~typing.Generic` objects and support a `!Row` parameter which is the type of the records returned. The parameter can be configured by passing a `!row_factory` parameter to the constructor or to the `~Connection.cursor()` method. By default, methods producing records such as `Cursor.fetchall()` return normal tuples of unknown size and content. As such, the `connect()` function -returns an object of type `!psycopg.Connection[tuple[Any, ...]]` and -`Connection.cursor()` returns an object of type `!psycopg.Cursor[tuple[Any, +returns an object of type `!gaussdb.Connection[tuple[Any, ...]]` and +`Connection.cursor()` returns an object of type `!gaussdb.Cursor[tuple[Any, ...]]`. If you are writing generic plumbing code it might be practical to use annotations such as `!Connection[Any]` and `!Cursor[Any]`. .. code:: python - conn = psycopg.connect() # type is psycopg.Connection[tuple[Any, ...]] + conn = gaussdb.connect() # type is gaussdb.Connection[tuple[Any, ...]] - cur = conn.cursor() # type is psycopg.Cursor[tuple[Any, ...]] + cur = conn.cursor() # type is gaussdb.Cursor[tuple[Any, ...]] rec = cur.fetchone() # type is tuple[Any, ...] | None @@ -55,12 +55,12 @@ cursors and annotate the returned objects accordingly. See .. code:: python - dconn = psycopg.connect(row_factory=dict_row) - # dconn type is psycopg.Connection[dict[str, Any]] + dconn = gaussdb.connect(row_factory=dict_row) + # dconn type is gaussdb.Connection[dict[str, Any]] dcur = conn.cursor(row_factory=dict_row) dcur = dconn.cursor() - # dcur type is psycopg.Cursor[dict[str, Any]] in both cases + # dcur type is gaussdb.Cursor[dict[str, Any]] in both cases drec = dcur.fetchone() # drec type is dict[str, Any] | None @@ -73,8 +73,8 @@ Generic pool types .. versionadded:: 3.2 -The `~psycopg_pool.ConnectionPool` class and similar are generic on their -`!connection_class` argument. The `~psycopg_pool.ConnectionPool.connection()` +The `~gaussdb_pool.ConnectionPool` class and similar are generic on their +`!connection_class` argument. The `~gaussdb_pool.ConnectionPool.connection()` method is annotated as returning a connection of that type, and the record returned will follow the rule as in :ref:`row-factory-static`. @@ -84,8 +84,8 @@ otherwise the typing system and the runtime will not agree. .. code:: python - from psycopg import Connection - from psycopg.rows import DictRow, dict_row + from gaussdb import Connection + from gaussdb.rows import DictRow, dict_row with ConnectionPool( connection_class=Connection[DictRow], # provides type hinting @@ -144,8 +144,8 @@ any issue. Pydantic will also raise a runtime error in case the from datetime import date from typing import Optional - import psycopg - from psycopg.rows import class_row + import gaussdb + from gaussdb.rows import class_row from pydantic import BaseModel class Person(BaseModel): @@ -155,7 +155,7 @@ any issue. Pydantic will also raise a runtime error in case the dob: Optional[date] def fetch_person(id: int) -> Person: - with psycopg.connect() as conn: + with gaussdb.connect() as conn: with conn.cursor(row_factory=class_row(Person)) as cur: cur.execute( """ @@ -202,13 +202,13 @@ argument to `!execute()`, not by string composition: .. code:: python - def get_record(conn: psycopg.Connection[Any], id: int) -> Any: + def get_record(conn: gaussdb.Connection[Any], id: int) -> Any: cur = conn.execute("SELECT * FROM my_table WHERE id = %s" % id) # BAD! return cur.fetchone() # the function should be implemented as: - def get_record(conn: psycopg.Connection[Any], id: int) -> Any: + def get_record(conn: gaussdb.Connection[Any], id: int) -> Any: cur = conn.execute("select * FROM my_table WHERE id = %s", (id,)) return cur.fetchone() @@ -218,18 +218,18 @@ and similar to escape safely table and field names. The parameter of the .. code:: python - def count_records(conn: psycopg.Connection[Any], table: str) -> int: + def count_records(conn: gaussdb.Connection[Any], table: str) -> int: query = "SELECT count(*) FROM %s" % table # BAD! return conn.execute(query).fetchone()[0] # the function should be implemented as: - def count_records(conn: psycopg.Connection[Any], table: str) -> int: + def count_records(conn: gaussdb.Connection[Any], table: str) -> int: query = sql.SQL("SELECT count(*) FROM {}").format(sql.Identifier(table)) return conn.execute(query).fetchone()[0] At the time of writing, no Python static analyzer implements this check (`mypy -doesn't implement it`__, Pyre_ does, but `doesn't work with psycopg yet`__). +doesn't implement it`__, Pyre_ does, but `doesn't work with gaussdb yet`__). Once the type checkers support will be complete, the above bad statements should be reported as errors. diff --git a/docs/api/abc.rst b/docs/api/abc.rst index 90a1dd034..dd6eebbd9 100644 --- a/docs/api/abc.rst +++ b/docs/api/abc.rst @@ -1,22 +1,22 @@ -`!abc` -- Psycopg abstract classes +`!abc` -- GaussDB abstract classes ================================== -The module exposes Psycopg definitions which can be used for static type +The module exposes GaussDB definitions which can be used for static type checking. -.. module:: psycopg.abc +.. module:: gaussdb.abc .. seealso:: :ref:`adapt-life-cycle` for more information about how these objects - are used by Psycopg, + are used by GaussDB, .. autoclass:: Dumper(cls, context=None) This class is a formal `~typing.Protocol`. A partial implementation of this protocol (implementing everything except the `dump()` metood) is - available as `psycopg.adapt.Dumper`. + available as `gaussdb.adapt.Dumper`. :param cls: The type that will be managed by this dumper. :type cls: type @@ -41,7 +41,7 @@ checking. .. tip:: - This method will be used by `~psycopg.sql.Literal` to convert a + This method will be used by `~gaussdb.sql.Literal` to convert a value client-side. This method only makes sense for text dumpers; the result of calling @@ -54,9 +54,9 @@ checking. from the context, but this may fail in some contexts and may require a cast (e.g. specifying :samp:`%s::{type}` for its placeholder). - You can use the `psycopg.adapters`\ ``.``\ - `~psycopg.adapt.AdaptersMap.types` registry to find the OID of builtin - types, and you can use `~psycopg.types.TypeInfo` to extend the + You can use the `gaussdb.adapters`\ ``.``\ + `~gaussdb.adapt.AdaptersMap.types` registry to find the OID of builtin + types, and you can use `~gaussdb.types.TypeInfo` to extend the registry to custom types. .. automethod:: get_key @@ -67,7 +67,7 @@ checking. This class is a formal `~typing.Protocol`. A partial implementation of this protocol (implementing everything except the `load()` method) is available - as `psycopg.adapt.Loader`. + as `gaussdb.adapt.Loader`. :param oid: The type that will be managed by this dumper. :type oid: int diff --git a/docs/api/adapt.rst b/docs/api/adapt.rst index db3b2b540..fdc1eec14 100644 --- a/docs/api/adapt.rst +++ b/docs/api/adapt.rst @@ -1,18 +1,18 @@ `adapt` -- Types adaptation =========================== -.. module:: psycopg.adapt +.. module:: gaussdb.adapt -The `!psycopg.adapt` module exposes a set of objects useful for the +The `!gaussdb.adapt` module exposes a set of objects useful for the configuration of *data adaptation*, which is the conversion of Python objects to PostgreSQL data types and back. These objects are useful if you need to configure data adaptation, i.e. -if you need to change the default way that Psycopg converts between types or +if you need to change the default way that GaussDB converts between types or if you want to adapt custom data types and objects. You don't need this object -in the normal use of Psycopg. +in the normal use of GaussDB. -See :ref:`adaptation` for an overview of the Psycopg adaptation system. +See :ref:`adaptation` for an overview of the GaussDB adaptation system. .. _abstract base class: https://docs.python.org/glossary.html#term-abstract-base-class @@ -23,7 +23,7 @@ Dumpers and loaders .. autoclass:: Dumper(cls, context=None) This is an `abstract base class`_, partially implementing the - `~psycopg.abc.Dumper` protocol. Subclasses *must* at least implement the + `~gaussdb.abc.Dumper` protocol. Subclasses *must* at least implement the `.dump()` method and optionally override other members. .. automethod:: dump @@ -34,10 +34,10 @@ Dumpers and loaders the database. .. attribute:: format - :type: psycopg.pq.Format + :type: gaussdb.pq.Format :value: TEXT - Class attribute. Set it to `~psycopg.pq.Format.BINARY` if the class + Class attribute. Set it to `~gaussdb.pq.Format.BINARY` if the class `dump()` methods converts the object to binary format. .. automethod:: quote @@ -50,16 +50,16 @@ Dumpers and loaders .. autoclass:: Loader(oid, context=None) This is an `abstract base class`_, partially implementing the - `~psycopg.abc.Loader` protocol. Subclasses *must* at least implement the + `~gaussdb.abc.Loader` protocol. Subclasses *must* at least implement the `.load()` method and optionally override other members. .. automethod:: load .. attribute:: format - :type: psycopg.pq.Format + :type: gaussdb.pq.Format :value: TEXT - Class attribute. Set it to `~psycopg.pq.Format.BINARY` if the class + Class attribute. Set it to `~gaussdb.pq.Format.BINARY` if the class `load()` methods converts the object from binary format. @@ -83,7 +83,7 @@ Other objects used in adaptations The object where to look up for types information (such as the mapping between type names and oids in the specified context). - :type: `~psycopg.types.TypesRegistry` + :type: `~gaussdb.types.TypesRegistry` .. automethod:: get_dumper .. automethod:: get_dumper_by_oid @@ -93,4 +93,4 @@ Other objects used in adaptations .. autoclass:: Transformer(context=None) :param context: The context where the transformer should operate. - :type context: `~psycopg.abc.AdaptContext` + :type context: `~gaussdb.abc.AdaptContext` diff --git a/docs/api/connections.rst b/docs/api/connections.rst index 4dfaec271..b4c2846dd 100644 --- a/docs/api/connections.rst +++ b/docs/api/connections.rst @@ -1,4 +1,4 @@ -.. currentmodule:: psycopg +.. currentmodule:: gaussdb Connection classes ================== @@ -38,24 +38,24 @@ The `!Connection` class :param autocommit: If `!True` don't start transactions automatically. See :ref:`transactions` for details. :param row_factory: The row factory specifying what type of records - to create fetching data (default: `~psycopg.rows.tuple_row()`). See + to create fetching data (default: `~gaussdb.rows.tuple_row()`). See :ref:`row-factories` for details. :param cursor_factory: Initial value for the `cursor_factory` attribute - of the connection (new in Psycopg 3.1). + of the connection (new in gaussdb.1). :param prepare_threshold: Initial value for the `prepare_threshold` - attribute of the connection (new in Psycopg 3.1). + attribute of the connection (new in gaussdb.1). More specialized use: :param context: A context to copy the initial adapters configuration - from. It might be an `~psycopg.adapt.AdaptersMap` with customized + from. It might be an `~gaussdb.adapt.AdaptersMap` with customized loaders and dumpers, used as a template to create several connections. See :ref:`adaptation` for further details. .. __: https://www.postgresql.org/docs/current/libpq-connect.html #LIBPQ-CONNSTRING - This method is also aliased as `psycopg.connect()`. + This method is also aliased as `gaussdb.connect()`. .. seealso:: @@ -74,7 +74,7 @@ The `!Connection` class You can use:: - with psycopg.connect() as conn: + with gaussdb.connect() as conn: ... to close the connection automatically when the block is exited. @@ -120,21 +120,21 @@ The `!Connection` class The type, or factory function, returned by `cursor()` and `execute()`. - Default is `psycopg.Cursor`. + Default is `gaussdb.Cursor`. .. autoattribute:: server_cursor_factory The type, or factory function, returned by `cursor()` when a name is specified. - Default is `psycopg.ServerCursor`. + Default is `gaussdb.ServerCursor`. .. autoattribute:: row_factory The row factory defining the type of rows returned by `~Cursor.fetchone()` and the other cursor fetch methods. - The default is `~psycopg.rows.tuple_row`, which means that the fetch + The default is `~gaussdb.rows.tuple_row`, which means that the fetch methods will return simple tuples. .. seealso:: See :ref:`row-factories` for details about defining the @@ -255,12 +255,12 @@ The `!Connection` class .. rubric:: Checking and configuring the connection state .. attribute:: pgconn - :type: psycopg.pq.PGconn + :type: gaussdb.pq.PGconn The `~pq.PGconn` libpq connection wrapper underlying the `!Connection`. It can be used to send low level commands to PostgreSQL and access - features not currently wrapped by Psycopg. + features not currently wrapped by GaussDB. .. autoattribute:: info @@ -450,10 +450,10 @@ The `!Connection` class Returns a list of `Xid` representing pending transactions, suitable for use with `tpc_commit()` or `tpc_rollback()`. - If a transaction was not initiated by Psycopg, the returned Xids will + If a transaction was not initiated by GaussDB, the returned Xids will have attributes `~Xid.format_id` and `~Xid.bqual` set to `!None` and the `~Xid.gtrid` set to the PostgreSQL transaction ID: such Xids are - still usable for recovery. Psycopg uses the same algorithm of the + still usable for recovery. GaussDB uses the same algorithm of the `PostgreSQL JDBC driver`__ to encode a XA triple in a string, so transactions initiated by a program using such driver should be unpacked correctly. @@ -488,7 +488,7 @@ The `!AsyncConnection` class Automatically resolve domain names asynchronously. In previous versions, name resolution blocks, unless the `!hostaddr` - parameter is specified, or the `~psycopg._dns.resolve_hostaddr_async()` + parameter is specified, or the `~gaussdb._dns.resolve_hostaddr_async()` function is used. .. automethod:: close @@ -515,11 +515,11 @@ The `!AsyncConnection` class .. autoattribute:: cursor_factory - Default is `psycopg.AsyncCursor`. + Default is `gaussdb.AsyncCursor`. .. autoattribute:: server_cursor_factory - Default is `psycopg.AsyncServerCursor`. + Default is `gaussdb.AsyncServerCursor`. .. autoattribute:: row_factory diff --git a/docs/api/conninfo.rst b/docs/api/conninfo.rst index 9e5b01da2..eeacc2d7d 100644 --- a/docs/api/conninfo.rst +++ b/docs/api/conninfo.rst @@ -1,4 +1,4 @@ -.. _psycopg.conninfo: +.. _gaussdb.conninfo: `conninfo` -- manipulate connection strings =========================================== @@ -6,7 +6,7 @@ This module contains a few utility functions to manipulate database connection strings. -.. module:: psycopg.conninfo +.. module:: gaussdb.conninfo .. autofunction:: conninfo_to_dict diff --git a/docs/api/copy.rst b/docs/api/copy.rst index 81a96e2f2..9f46b0698 100644 --- a/docs/api/copy.rst +++ b/docs/api/copy.rst @@ -1,4 +1,4 @@ -.. currentmodule:: psycopg +.. currentmodule:: gaussdb COPY-related objects ==================== @@ -7,7 +7,7 @@ The main objects (`Copy`, `AsyncCopy`) present the main interface to exchange data during a COPY operations. These objects are normally obtained by the methods `Cursor.copy()` and `AsyncCursor.copy()`; however, they can be also created directly, for instance to write to a destination which is not a -database (e.g. using a `~psycopg.copy.FileWriter`). +database (e.g. using a `~gaussdb.copy.FileWriter`). See :ref:`copy` for details. @@ -63,7 +63,7 @@ Main Copy objects Writer objects -------------- -.. currentmodule:: psycopg.copy +.. currentmodule:: gaussdb.copy .. versionadded:: 3.1 @@ -74,7 +74,7 @@ customize further writing by implementing your own `Writer` or `AsyncWriter` subclass. Writers instances can be used passing them to the cursor -`~psycopg.Cursor.copy()` method or to the `~psycopg.Copy` constructor, as the +`~gaussdb.Cursor.copy()` method or to the `~gaussdb.Copy` constructor, as the `!writer` argument. .. autoclass:: Writer diff --git a/docs/api/crdb.rst b/docs/api/crdb.rst index de8344ec4..6c0d99034 100644 --- a/docs/api/crdb.rst +++ b/docs/api/crdb.rst @@ -1,19 +1,19 @@ `crdb` -- CockroachDB support ============================= -.. module:: psycopg.crdb +.. module:: gaussdb.crdb .. versionadded:: 3.1 CockroachDB_ is a distributed database using the same fronted-backend protocol -of PostgreSQL. As such, Psycopg can be used to write Python programs +of PostgreSQL. As such, GaussDB can be used to write Python programs interacting with CockroachDB. .. _CockroachDB: https://www.cockroachlabs.com/ -Opening a connection to a CRDB database using `psycopg.connect()` provides a -largely working object. However, using the `psycopg.crdb.connect()` function -instead, Psycopg will create more specialised objects and provide a types +Opening a connection to a CRDB database using `gaussdb.connect()` provides a +largely working object. However, using the `gaussdb.crdb.connect()` function +instead, GaussDB will create more specialised objects and provide a types mapping tweaked on the CockroachDB data model. @@ -24,17 +24,17 @@ Main differences from PostgreSQL CockroachDB behaviour is `different from PostgreSQL`__: please refer to the database documentation for details. These are some of the main differences -affecting Psycopg behaviour: +affecting GaussDB behaviour: .. __: https://www.cockroachlabs.com/docs/stable/postgresql-compatibility.html -- `~psycopg.Connection.cancel()` doesn't work before CockroachDB 22.1. On +- `~gaussdb.Connection.cancel()` doesn't work before CockroachDB 22.1. On older versions, you can use `CANCEL QUERY`_ instead (but from a different connection). - :ref:`server-side-cursors` are well supported only from CockroachDB 22.1.3. -- `~psycopg.ConnectionInfo.backend_pid` is only populated from CockroachDB +- `~gaussdb.ConnectionInfo.backend_pid` is only populated from CockroachDB 22.1. Note however that you cannot use the PID to terminate the session; use `SHOW session_id`_ to find the id of a session, which you may terminate with `CANCEL SESSION`_ in lieu of PostgreSQL's :sql:`pg_terminate_backend()`. @@ -45,7 +45,7 @@ affecting Psycopg behaviour: - The :ref:`two-phase commit protocol ` is not supported. - :sql:`LISTEN` and :sql:`NOTIFY` are not supported. However the `CHANGEFEED`_ - command, in conjunction with `~psycopg.Cursor.stream()`, can provide push + command, in conjunction with `~gaussdb.Cursor.stream()`, can provide push notifications. .. _CANCEL QUERY: https://www.cockroachlabs.com/docs/stable/cancel-query.html @@ -69,22 +69,22 @@ CockroachDB-specific objects .. autoclass:: CrdbConnection - `psycopg.Connection` subclass. + `gaussdb.Connection` subclass. .. automethod:: is_crdb :param conn: the connection to check - :type conn: `~psycopg.Connection`, `~psycopg.AsyncConnection`, `~psycopg.pq.PGconn` + :type conn: `~gaussdb.Connection`, `~gaussdb.AsyncConnection`, `~gaussdb.pq.PGconn` .. autoclass:: AsyncCrdbConnection - `psycopg.AsyncConnection` subclass. + `gaussdb.AsyncConnection` subclass. .. autoclass:: CrdbConnectionInfo - The object is returned by the `~psycopg.Connection.info` attribute of + The object is returned by the `~gaussdb.Connection.info` attribute of `CrdbConnection` and `AsyncCrdbConnection`. The object behaves like `!ConnectionInfo`, with the following differences: @@ -102,7 +102,7 @@ CockroachDB-specific objects converted into each other. The map is used as a template when new connections are created, using - `psycopg.crdb.connect()` (similarly to the way `psycopg.adapters` is used + `gaussdb.crdb.connect()` (similarly to the way `gaussdb.adapters` is used as template for new PostgreSQL connections). This registry contains only the types and adapters supported by diff --git a/docs/api/cursors.rst b/docs/api/cursors.rst index 64af11e4f..0449b82cd 100644 --- a/docs/api/cursors.rst +++ b/docs/api/cursors.rst @@ -1,4 +1,4 @@ -.. currentmodule:: psycopg +.. currentmodule:: gaussdb Cursor classes ============== @@ -16,7 +16,7 @@ set as `Connection.cursor_factory` to require them on `!cursor()` call. This page describe the details of the `!Cursor` class interface. Please refer to :ref:`cursor-types` for general information about the different types of -cursors available in Psycopg. +cursors available in GaussDB. The `!Cursor` class @@ -192,7 +192,7 @@ The `!Cursor` class .. warning:: Failing to consume the iterator entirely will result in a - connection left in `~psycopg.ConnectionInfo.transaction_status` + connection left in `~gaussdb.ConnectionInfo.transaction_status` `~pq.TransactionStatus.ACTIVE` state: this connection will refuse to receive further commands (with a message such as *another command is already in progress*). @@ -250,7 +250,7 @@ The `!Cursor` class The property affects the objects returned by the `fetchone()`, `fetchmany()`, `fetchall()` methods. The default - (`~psycopg.rows.tuple_row`) returns a tuple for each record fetched. + (`~gaussdb.rows.tuple_row`) returns a tuple for each record fetched. See :ref:`row-factories` for details. @@ -261,13 +261,13 @@ The `!Cursor` class .. automethod:: scroll .. attribute:: pgresult - :type: Optional[psycopg.pq.PGresult] + :type: Optional[gaussdb.pq.PGresult] The result returned by the last query and currently exposed by the cursor, if available, else `!None`. It can be used to obtain low level info about the last query result - and to access to features not currently wrapped by Psycopg. + and to access to features not currently wrapped by GaussDB. .. rubric:: Information about the data diff --git a/docs/api/dns.rst b/docs/api/dns.rst index b109c2716..76ada6501 100644 --- a/docs/api/dns.rst +++ b/docs/api/dns.rst @@ -1,7 +1,7 @@ `_dns` -- DNS resolution utilities ================================== -.. module:: psycopg._dns +.. module:: gaussdb._dns This module contains a few experimental utilities to interact with the DNS server before performing a connection. @@ -13,7 +13,7 @@ server before performing a connection. .. warning:: This module depends on the `dnspython`_ package. The package is currently - not installed automatically as a Psycopg dependency and must be installed + not installed automatically as a GaussDB dependency and must be installed manually: .. code:: sh @@ -28,7 +28,7 @@ server before performing a connection. Apply SRV DNS lookup as defined in :RFC:`2782`. :param params: The input parameters, for instance as returned by - `~psycopg.conninfo.conninfo_to_dict()`. + `~gaussdb.conninfo.conninfo_to_dict()`. :type params: `!dict` :return: An updated list of connection parameters. @@ -37,7 +37,7 @@ server before performing a connection. If lookup is successful, return a params dict with hosts and ports replaced with the looked-up entries. - Raise `~psycopg.OperationalError` if no lookup is successful and no host + Raise `~gaussdb.OperationalError` if no lookup is successful and no host (looked up or unchanged) could be returned. In addition to the rules defined by RFC 2782 about the host name pattern, @@ -49,20 +49,20 @@ server before performing a connection. .. note:: One possible way to use this function automatically is to subclass - `~psycopg.Connection`, extending the - `~psycopg.Connection._get_connection_params()` method:: + `~gaussdb.Connection`, extending the + `~gaussdb.Connection._get_connection_params()` method:: - import psycopg._dns # not imported automatically + import gaussdb._dns # not imported automatically - class SrvCognizantConnection(psycopg.Connection): + class SrvCognizantConnection(gaussdb.Connection): @classmethod def _get_connection_params(cls, conninfo, **kwargs): params = super()._get_connection_params(conninfo, **kwargs) - params = psycopg._dns.resolve_srv(params) + params = gaussdb._dns.resolve_srv(params) return params # The name will be resolved to db1.example.com - cnn = SrvCognizantConnection.connect("host=_postgres._tcp.db.psycopg.org") + cnn = SrvCognizantConnection.connect("host=_postgres._tcp.db.gaussdb.org") .. function:: resolve_srv_async(params) @@ -71,7 +71,7 @@ server before performing a connection. Async equivalent of `resolve_srv()`. -.. automethod:: psycopg.Connection._get_connection_params +.. automethod:: gaussdb.Connection._get_connection_params .. warning:: This is an experimental method. @@ -87,7 +87,7 @@ server before performing a connection. return params -.. automethod:: psycopg.AsyncConnection._get_connection_params +.. automethod:: gaussdb.AsyncConnection._get_connection_params .. warning:: This is an experimental method. @@ -100,11 +100,11 @@ server before performing a connection. .. deprecated:: 3.1 The use of this function is not necessary anymore, because - `psycopg.AsyncConnection.connect()` performs non-blocking name + `gaussdb.AsyncConnection.connect()` performs non-blocking name resolution automatically. :param params: The input parameters, for instance as returned by - `~psycopg.conninfo.conninfo_to_dict()`. + `~gaussdb.conninfo.conninfo_to_dict()`. :type params: `!dict` If a ``host`` param is present but not ``hostname``, resolve the host @@ -114,7 +114,7 @@ server before performing a connection. connecting without further DNS lookups, eventually removing hosts that are not resolved, keeping the lists of hosts and ports consistent. - Raise `~psycopg.OperationalError` if connection is not possible (e.g. no + Raise `~gaussdb.OperationalError` if connection is not possible (e.g. no host resolve, inconsistent lists length). See `the PostgreSQL docs`__ for explanation of how these params are used, @@ -124,22 +124,22 @@ server before performing a connection. #LIBPQ-PARAMKEYWORDS .. warning:: - Before psycopg 3.1, this function doesn't handle the ``/etc/hosts`` file. + Before gaussdb 3.1, this function doesn't handle the ``/etc/hosts`` file. .. note:: - Starting from psycopg 3.1, a similar operation is performed + Starting from gaussdb 3.1, a similar operation is performed automatically by `!AsyncConnection._get_connection_params()`, so this function is unneeded. - In psycopg 3.0, one possible way to use this function automatically is - to subclass `~psycopg.AsyncConnection`, extending the - `~psycopg.AsyncConnection._get_connection_params()` method:: + In gaussdb 3.0, one possible way to use this function automatically is + to subclass `~gaussdb.AsyncConnection`, extending the + `~gaussdb.AsyncConnection._get_connection_params()` method:: - import psycopg._dns # not imported automatically + import gaussdb._dns # not imported automatically - class AsyncDnsConnection(psycopg.AsyncConnection): + class AsyncDnsConnection(gaussdb.AsyncConnection): @classmethod async def _get_connection_params(cls, conninfo, **kwargs): params = await super()._get_connection_params(conninfo, **kwargs) - params = await psycopg._dns.resolve_hostaddr_async(params) + params = await gaussdb._dns.resolve_hostaddr_async(params) return params diff --git a/docs/api/errors.rst b/docs/api/errors.rst index 10301867f..e5e84b89b 100644 --- a/docs/api/errors.rst +++ b/docs/api/errors.rst @@ -1,7 +1,7 @@ `errors` -- Package exceptions ============================== -.. module:: psycopg.errors +.. module:: gaussdb.errors .. index:: single: Error; Class @@ -9,7 +9,7 @@ This module exposes objects to represent and examine database errors. -.. currentmodule:: psycopg +.. currentmodule:: gaussdb .. index:: single: Exceptions; DB-API @@ -19,7 +19,7 @@ This module exposes objects to represent and examine database errors. DB-API exceptions ----------------- -In compliance with the DB-API, all the exceptions raised by Psycopg +In compliance with the DB-API, all the exceptions raised by GaussDB derive from the following classes: .. parsed-literal:: @@ -36,7 +36,7 @@ derive from the following classes: \|__ `ProgrammingError` \|__ `NotSupportedError` -These classes are exposed both by this module and the root `psycopg` module. +These classes are exposed both by this module and the root `gaussdb` module. .. autoexception:: Error() @@ -50,10 +50,10 @@ These classes are exposed both by this module and the root `psycopg` module. .. autoattribute:: pgconn - It has been closed and will be in `~psycopg.pq.ConnStatus.BAD` state; + It has been closed and will be in `~gaussdb.pq.ConnStatus.BAD` state; however it might be useful to verify precisely what went wrong, for - instance checking the `~psycopg.pq.PGconn.needs_password` and - `~psycopg.pq.PGconn.used_password` attributes. + instance checking the `~gaussdb.pq.PGconn.needs_password` and + `~gaussdb.pq.PGconn.used_password` attributes. Attempting to operate this connection will raise an :exc:`OperationalError`. @@ -75,13 +75,13 @@ These classes are exposed both by this module and the root `psycopg` module. .. autoexception:: NotSupportedError() -Other Psycopg errors +Other GaussDB errors ^^^^^^^^^^^^^^^^^^^^ -.. currentmodule:: psycopg.errors +.. currentmodule:: gaussdb.errors -In addition to the standard DB-API errors, Psycopg defines a few more specific +In addition to the standard DB-API errors, GaussDB defines a few more specific ones. .. autoexception:: ConnectionTimeout() @@ -98,9 +98,9 @@ Error diagnostics .. autoclass:: Diagnostic() - The object is available as the `~psycopg.Error`.\ `~psycopg.Error.diag` + The object is available as the `~gaussdb.Error`.\ `~gaussdb.Error.diag` attribute and is passed to the callback functions registered with - `~psycopg.Connection.add_notice_handler()`. + `~gaussdb.Connection.add_notice_handler()`. All the information available from the :pq:`PQresultErrorField()` function are exposed as attributes by the object. For instance the `!severity` @@ -143,9 +143,9 @@ SQLSTATE exceptions Errors coming from a database server (as opposite as ones generated client-side, such as connection failed) usually have a 5-letters error code called SQLSTATE (available in the `~Diagnostic.sqlstate` attribute of the -error's `~psycopg.Error.diag` attribute). +error's `~gaussdb.Error.diag` attribute). -Psycopg exposes a different class for each SQLSTATE value, allowing to +GaussDB exposes a different class for each SQLSTATE value, allowing to write idiomatic error handling code according to specific conditions happening in the database: @@ -153,7 +153,7 @@ in the database: try: cur.execute("LOCK TABLE mytable IN ACCESS EXCLUSIVE MODE NOWAIT") - except psycopg.errors.LockNotAvailable: + except gaussdb.errors.LockNotAvailable: locked = True The exception names are generated from the PostgreSQL source code and includes @@ -168,7 +168,7 @@ the classes defined. .. __: https://www.postgresql.org/docs/current/errcodes-appendix.html#ERRCODES-TABLE Every exception class is a subclass of one of the :ref:`standard DB-API -exception `, thus exposing the `~psycopg.Error` interface. +exception `, thus exposing the `~gaussdb.Error` interface. .. versionchanged:: 3.1.4 Added exceptions introduced in PostgreSQL 15. @@ -182,9 +182,9 @@ exception `, thus exposing the `~psycopg.Error` interface. try: cur.execute("LOCK TABLE mytable IN ACCESS EXCLUSIVE MODE NOWAIT") - except psycopg.errors.lookup("UNDEFINED_TABLE"): + except gaussdb.errors.lookup("UNDEFINED_TABLE"): missing = True - except psycopg.errors.lookup("55P03"): + except gaussdb.errors.lookup("55P03"): locked = True diff --git a/docs/api/index.rst b/docs/api/index.rst index b99550d89..4a87532df 100644 --- a/docs/api/index.rst +++ b/docs/api/index.rst @@ -1,10 +1,10 @@ -Psycopg 3 API +gaussdb API ============= .. _api: This sections is a reference for all the public objects exposed by the -`psycopg` module. For a more conceptual description you can take a look at +`gaussdb` module. For a more conceptual description you can take a look at :ref:`basic` and :ref:`advanced`. .. toctree:: diff --git a/docs/api/module.rst b/docs/api/module.rst index a8c1625af..645b84261 100644 --- a/docs/api/module.rst +++ b/docs/api/module.rst @@ -1,13 +1,13 @@ -The `!psycopg` module +The `!gaussdb` module ===================== -Psycopg implements the `Python Database DB API 2.0 specification`__. As such +GaussDB implements the `Python Database DB API 2.0 specification`__. As such it also exposes the `module-level objects`__ required by the specifications. .. __: https://www.python.org/dev/peps/pep-0249/ .. __: https://www.python.org/dev/peps/pep-0249/#module-interface -.. module:: psycopg +.. module:: gaussdb .. autofunction:: connect @@ -20,14 +20,14 @@ it also exposes the `module-level objects`__ required by the specifications. .. data:: capabilities An object that can be used to verify that the client library used by - psycopg implements a certain feature. For instance:: + gaussdb implements a certain feature. For instance:: # Fail at import time if encrypted passwords is not available - import psycopg - psycopg.capabilities.has_encrypt_password(check=True) + import gaussdb + gaussdb.capabilities.has_encrypt_password(check=True) # Verify at runtime if a feature can be used - if psycopg.capabilities.has_hostaddr(): + if gaussdb.capabilities.has_hostaddr(): print(conn.info.hostaddr) else: print("unknown connection hostadd") @@ -39,8 +39,8 @@ it also exposes the `module-level objects`__ required by the specifications. .. rubric:: Exceptions -The standard `DBAPI exceptions`__ are exposed both by the `!psycopg` module -and by the `psycopg.errors` module. The latter also exposes more specific +The standard `DBAPI exceptions`__ are exposed both by the `!gaussdb` module +and by the `gaussdb.errors` module. The latter also exposes more specific exceptions, mapping to the database error states (see :ref:`sqlstate-exceptions`). @@ -67,12 +67,12 @@ exceptions, mapping to the database error states (see converted into each other. This map is used as a template when new connections are created, using - `psycopg.connect()`. Its `~psycopg.adapt.AdaptersMap.types` attribute is a - `~psycopg.types.TypesRegistry` containing information about every + `gaussdb.connect()`. Its `~gaussdb.adapt.AdaptersMap.types` attribute is a + `~gaussdb.types.TypesRegistry` containing information about every PostgreSQL builtin type, useful for adaptation customisation (see :ref:`adaptation`):: - >>> psycopg.adapters.types["int4"] + >>> gaussdb.adapters.types["int4"] - :type: `~psycopg.adapt.AdaptersMap` + :type: `~gaussdb.adapt.AdaptersMap` diff --git a/docs/api/objects.rst b/docs/api/objects.rst index 5f6e2902e..c45258be4 100644 --- a/docs/api/objects.rst +++ b/docs/api/objects.rst @@ -1,4 +1,4 @@ -.. currentmodule:: psycopg +.. currentmodule:: gaussdb Other top-level objects ======================= @@ -75,7 +75,7 @@ Connection information .. autoattribute:: hostaddr Only available if the libpq used is from PostgreSQL 12 or newer. - Raise `~psycopg.NotSupportedError` otherwise. You can use the + Raise `~gaussdb.NotSupportedError` otherwise. You can use the `~Capabilities.has_hostaddr` capability to check for support. .. autoattribute:: port @@ -118,13 +118,13 @@ Libpq capabilities information .. autoclass:: Capabilities An instance of this object is normally exposed by the module as the object - `psycopg.capabilities`. + `gaussdb.capabilities`. Every feature check is implemented by an `!has_SOMETHING()` method. All the methods return a boolean value stating if the capability is supported, which can be used by a program to degrade gracefully:: - if psycopg.capabilities.has_pipeline() + if gaussdb.capabilities.has_pipeline() with conn.pipeline(): operations(conn) else: @@ -136,11 +136,11 @@ Libpq capabilities information feature is not supported. This allows to make a check at import time, crashing early and with a clear description of the problem. - >>> import psycopg - >>> psycopg.capabilities.has_pipeline(check=True) + >>> import gaussdb + >>> gaussdb.capabilities.has_pipeline(check=True) Traceback (most recent call last): ... - psycopg.NotSupportedError: the feature 'Connection.pipeline()' is not available: + gaussdb.NotSupportedError: the feature 'Connection.pipeline()' is not available: the client libpq version (imported from system libraries) is 13.4; the feature requires libpq version 14.0 or newer diff --git a/docs/api/pool.rst b/docs/api/pool.rst index 76aed2b7d..a5b1c0511 100644 --- a/docs/api/pool.rst +++ b/docs/api/pool.rst @@ -1,10 +1,10 @@ -`!psycopg_pool` -- Connection pool implementations +`!gaussdb_pool` -- Connection pool implementations ================================================== .. index:: double: Connection; Pool -.. module:: psycopg_pool +.. module:: gaussdb_pool A connection pool is an object used to create and maintain a limited amount of PostgreSQL connections, reducing the time requested by the program to obtain a @@ -15,11 +15,11 @@ threads or tasks to use a controlled amount of resources on the server. See This package exposes a few connection pool classes: - `ConnectionPool` is a synchronous connection pool yielding - `~psycopg.Connection` objects and can be used by multithread applications. + `~gaussdb.Connection` objects and can be used by multithread applications. - `AsyncConnectionPool` has an interface similar to `!ConnectionPool`, but with `asyncio` functions replacing blocking functions, and yields - `~psycopg.AsyncConnection` instances. + `~gaussdb.AsyncConnection` instances. - `NullConnectionPool` is a `!ConnectionPool` subclass exposing the same interface of its parent, but not keeping any unused connection in its state. @@ -29,12 +29,12 @@ This package exposes a few connection pool classes: `!NullConnectionPool`, but with the same async interface of the `!AsyncConnectionPool`. -.. note:: The `!psycopg_pool` package is distributed separately from the main - `psycopg` package: use ``pip install "psycopg[pool]"``, or ``pip install - psycopg_pool``, to make it available. See :ref:`pool-installation`. +.. note:: The `!gaussdb_pool` package is distributed separately from the main + `gaussdb` package: use ``pip install "gaussdb[pool]"``, or ``pip install + gaussdb_pool``, to make it available. See :ref:`pool-installation`. - The version numbers indicated in this page refer to the `!psycopg_pool` - package, not to `psycopg`. + The version numbers indicated in this page refer to the `!gaussdb_pool` + package, not to `gaussdb`. The `!ConnectionPool` class @@ -42,19 +42,19 @@ The `!ConnectionPool` class .. autoclass:: ConnectionPool - This class implements a connection pool serving `~psycopg.Connection` + This class implements a connection pool serving `~gaussdb.Connection` instances (or subclasses). The constructor has *alot* of arguments, but only `!conninfo` and `!min_size` are the fundamental ones, all the other arguments have meaningful defaults and can probably be tweaked later, if required. :param conninfo: The connection string. See - `~psycopg.Connection.connect()` for details. + `~gaussdb.Connection.connect()` for details. :type conninfo: `!str` :param connection_class: The class of the connections to serve. It should be a `!Connection` subclass. - :type connection_class: `!type`, default: `~psycopg.Connection` + :type connection_class: `!type`, default: `~gaussdb.Connection` :param kwargs: Extra arguments to pass to `!connect()`. Note that this is *one dict argument* of the pool constructor, which is @@ -178,7 +178,7 @@ The `!ConnectionPool` class the pool as context manager, you should specify the parameter `!open=True` explicitly. - Starting from psycopg_pool 3.2, a warning is raised if the pool is + Starting from gaussdb_pool 3.2, a warning is raised if the pool is used with the expectation of being implicitly opened in the constructor and `!open` is not specified. @@ -248,15 +248,15 @@ Pool exceptions .. autoclass:: PoolTimeout() - Subclass of `~psycopg.OperationalError` + Subclass of `~gaussdb.OperationalError` .. autoclass:: PoolClosed() - Subclass of `~psycopg.OperationalError` + Subclass of `~gaussdb.OperationalError` .. autoclass:: TooManyRequests() - Subclass of `~psycopg.OperationalError` + Subclass of `~gaussdb.OperationalError` The `!AsyncConnectionPool` class @@ -264,7 +264,7 @@ The `!AsyncConnectionPool` class `!AsyncConnectionPool` has a very similar interface to the `ConnectionPool` class but its blocking methods are implemented as `!async` coroutines. It -returns instances of `~psycopg.AsyncConnection`, or of its subclass if +returns instances of `~gaussdb.AsyncConnection`, or of its subclass if specified so in the `!connection_class` parameter. Only the functions and parameters with different signature from @@ -274,7 +274,7 @@ Only the functions and parameters with different signature from :param connection_class: The class of the connections to serve. It should be an `!AsyncConnection` subclass. - :type connection_class: `!type`, default: `~psycopg.AsyncConnection` + :type connection_class: `!type`, default: `~gaussdb.AsyncConnection` :param check: A callback to check that a connection is working correctly when obtained by the pool. @@ -317,7 +317,7 @@ Only the functions and parameters with different signature from async with AsyncConnectionPool(..., open=False) as pool: ... - Starting from psycopg_pool 3.2, opening an async pool in the + Starting from gaussdb_pool 3.2, opening an async pool in the constructor raises a warning. .. automethod:: connection diff --git a/docs/api/pq.rst b/docs/api/pq.rst index cb500eff7..f8cb04c21 100644 --- a/docs/api/pq.rst +++ b/docs/api/pq.rst @@ -1,4 +1,4 @@ -.. _psycopg.pq: +.. _gaussdb.pq: `pq` -- libpq wrapper module ============================ @@ -6,16 +6,16 @@ .. index:: single: libpq -.. module:: psycopg.pq +.. module:: gaussdb.pq -Psycopg is built around the libpq_, the PostgreSQL client library, which +GaussDB is built around the libpq_, the PostgreSQL client library, which performs most of the network communications and returns query results in C structures. .. _libpq: https://www.postgresql.org/docs/current/libpq.html The low-level functions of the library are exposed by the objects in the -`!psycopg.pq` module. +`!gaussdb.pq` module. .. _pq-impl: @@ -34,28 +34,28 @@ same interface. Current implementations are: in Cython_). It is much better performing than the ``python`` implementation, however it requires development packages installed on the client machine. It can be installed using the ``c`` extra, i.e. running - ``pip install "psycopg[c]"``. + ``pip install "gaussdb[c]"``. - ``binary``: a pre-compiled C implementation, bundled with all the required libraries. It is the easiest option to deal with, fast to install and it should require no development tool or client library, however it may be not available for every platform. You can install it using the ``binary`` extra, - i.e. running ``pip install "psycopg[binary]"``. + i.e. running ``pip install "gaussdb[binary]"``. .. _Cython: https://cython.org/ -The implementation currently used is available in the `~psycopg.pq.__impl__` +The implementation currently used is available in the `~gaussdb.pq.__impl__` module constant. -At import time, Psycopg 3 will try to use the best implementation available +At import time, gaussdb will try to use the best implementation available and will fail if none is usable. You can force the use of a specific -implementation by exporting the env var :envvar:`PSYCOPG_IMPL`: importing the +implementation by exporting the env var :envvar:`GAUSSDB_IMPL`: importing the library will fail if the requested implementation is not available:: - $ PSYCOPG_IMPL=c python -c "import psycopg" + $ GAUSSDB_IMPL=c python -c "import gaussdb" Traceback (most recent call last): ... - ImportError: couldn't import requested psycopg 'c' implementation: No module named 'psycopg_c' + ImportError: couldn't import requested gaussdb 'c' implementation: No module named 'gaussdb_c' Module content @@ -64,7 +64,7 @@ Module content .. autodata:: __impl__ The choice of implementation is automatic but can be forced setting the - :envvar:`PSYCOPG_IMPL` env var. + :envvar:`GAUSSDB_IMPL` env var. .. autofunction:: version @@ -126,7 +126,7 @@ Objects wrapping libpq structures and functions B 39 DataRow 1 29 '2022-09-14 14:12:16.648035+02' B 13 CommandComplete "SELECT 1" B 5 ReadyForQuery T - + >>> conn.pgconn.untrace() diff --git a/docs/api/rows.rst b/docs/api/rows.rst index 0c9d1fb37..9d4b11227 100644 --- a/docs/api/rows.rst +++ b/docs/api/rows.rst @@ -1,11 +1,11 @@ -.. _psycopg.rows: +.. _gaussdb.rows: `rows` -- row factory implementations ===================================== -.. module:: psycopg.rows +.. module:: gaussdb.rows -The module exposes a few generic `~psycopg.RowFactory` implementation, which +The module exposes a few generic `~gaussdb.RowFactory` implementation, which can be used to retrieve data from the database in more complex structures than the basic tuples. @@ -54,8 +54,8 @@ Check out :ref:`row-factory-create` for information about how to use these objec Example:: from dataclasses import dataclass - import psycopg - from psycopg.rows import class_row + import gaussdb + from gaussdb.rows import class_row @dataclass class Person: @@ -63,7 +63,7 @@ Check out :ref:`row-factory-create` for information about how to use these objec last_name: str age: int = None - conn = psycopg.connect() + conn = gaussdb.connect() cur = conn.cursor(row_factory=class_row(Person)) cur.execute("select 'John' as first_name, 'Smith' as last_name").fetchone() @@ -82,24 +82,24 @@ checks, such as mypy_. .. _mypy: https://mypy.readthedocs.io/ -.. autoclass:: psycopg.rows.RowMaker() +.. autoclass:: gaussdb.rows.RowMaker() .. method:: __call__(values: Sequence[Any]) -> Row Convert a sequence of values from the database to a finished object. -.. autoclass:: psycopg.rows.RowFactory() +.. autoclass:: gaussdb.rows.RowFactory() .. method:: __call__(cursor: Cursor[Row]) -> RowMaker[Row] Inspect the result on a cursor and return a `RowMaker` to convert rows. -.. autoclass:: psycopg.rows.AsyncRowFactory() +.. autoclass:: gaussdb.rows.AsyncRowFactory() -.. autoclass:: psycopg.rows.BaseRowFactory() +.. autoclass:: gaussdb.rows.BaseRowFactory() Note that it's easy to implement an object implementing both `!RowFactory` and `!AsyncRowFactory`: usually, everything you need to implement a row factory is -to access the cursor's `~psycopg.Cursor.description`, which is provided by +to access the cursor's `~gaussdb.Cursor.description`, which is provided by both the cursor flavours. diff --git a/docs/api/sql.rst b/docs/api/sql.rst index 5e7000b26..d5722a6ac 100644 --- a/docs/api/sql.rst +++ b/docs/api/sql.rst @@ -4,11 +4,11 @@ .. index:: double: Binding; Client-Side -.. module:: psycopg.sql +.. module:: gaussdb.sql The module contains objects and functions useful to generate SQL dynamically, in a convenient and safe way. SQL identifiers (e.g. names of tables and -fields) cannot be passed to the `~psycopg.Cursor.execute()` method like query +fields) cannot be passed to the `~gaussdb.Cursor.execute()` method like query arguments:: # This will not work @@ -27,9 +27,9 @@ instance:: This sort of works, but it is an accident waiting to happen: the table name may be an invalid SQL literal and need quoting; even more serious is the security problem in case the table name comes from an untrusted source. The -name should be escaped using `~psycopg.pq.Escaping.escape_identifier()`:: +name should be escaped using `~gaussdb.pq.Escaping.escape_identifier()`:: - from psycopg.pq import Escaping + from gaussdb.pq import Escaping # This works, but it is not optimal table_name = 'my_table' @@ -45,11 +45,11 @@ but will eventually crash in the presence of a table or field name with containing characters to escape, or will present a potentially exploitable weakness. -The objects exposed by the `!psycopg.sql` module allow generating SQL +The objects exposed by the `!gaussdb.sql` module allow generating SQL statements on the fly, separating clearly the variable parts of the statement from the query parameters:: - from psycopg import sql + from gaussdb import sql cur.execute( sql.SQL("INSERT INTO {} VALUES (%s, %s)") @@ -64,7 +64,7 @@ Usually you should express the template of your query as an `SQL` instance with ``{}``\-style placeholders and use `~SQL.format()` to merge the variable parts into them, all of which must be `Composable` subclasses. You can still have ``%s``\-style placeholders in your query and pass values to -`~psycopg.Cursor.execute()`: such value placeholders will be untouched by +`~gaussdb.Cursor.execute()`: such value placeholders will be untouched by `!format()`:: query = sql.SQL("SELECT {field} FROM {table} WHERE {pkey} = %s").format( @@ -73,8 +73,8 @@ have ``%s``\-style placeholders in your query and pass values to pkey=sql.Identifier('id')) The resulting object is meant to be passed directly to cursor methods such as -`~psycopg.Cursor.execute()`, `~psycopg.Cursor.executemany()`, -`~psycopg.Cursor.copy()`, but can also be used to compose a query as a Python +`~gaussdb.Cursor.execute()`, `~gaussdb.Cursor.executemany()`, +`~gaussdb.Cursor.copy()`, but can also be used to compose a query as a Python string, using the `~Composable.as_string()` method:: cur.execute(query, (42,)) @@ -102,7 +102,7 @@ The `!sql` objects are in the following inheritance hierarchy: | ``|__`` `SQL`: a literal snippet of an SQL query | ``|__`` `Identifier`: a PostgreSQL identifier or dot-separated sequence of identifiers | ``|__`` `Literal`: a value hardcoded into a query -| ``|__`` `Placeholder`: a `%s`\ -style placeholder whose value will be added later e.g. by `~psycopg.Cursor.execute()` +| ``|__`` `Placeholder`: a `%s`\ -style placeholder whose value will be added later e.g. by `~gaussdb.Cursor.execute()` | ``|__`` `Composed`: a sequence of `!Composable` instances. diff --git a/docs/api/types.rst b/docs/api/types.rst index f04659e8c..0f62eb6d1 100644 --- a/docs/api/types.rst +++ b/docs/api/types.rst @@ -1,18 +1,18 @@ -.. currentmodule:: psycopg.types +.. currentmodule:: gaussdb.types -.. _psycopg.types: +.. _gaussdb.types: `!types` -- Types information and adapters ========================================== -.. module:: psycopg.types +.. module:: gaussdb.types -The `!psycopg.types` package exposes: +The `!gaussdb.types` package exposes: - objects to describe PostgreSQL types, such as `TypeInfo`, `TypesRegistry`, to help or :ref:`customise the types conversion `; -- concrete implementations of `~psycopg.abc.Loader` and `~psycopg.abc.Dumper` +- concrete implementations of `~gaussdb.abc.Loader` and `~gaussdb.abc.Dumper` protocols to :ref:`handle builtin data types `; - helper objects to represent PostgreSQL data types which :ref:`don't have a @@ -29,13 +29,13 @@ information, for instance the components of a composite type. You can use `TypeInfo.fetch()` to query information from a database catalog, which is then used by helper functions, such as -`~psycopg.types.hstore.register_hstore()`, to register adapters on types whose +`~gaussdb.types.hstore.register_hstore()`, to register adapters on types whose OID is not known upfront or to create more specialised adapters. -The `!TypeInfo` object doesn't instruct Psycopg to convert a PostgreSQL type -into a Python type: this is the role of a `~psycopg.abc.Loader`. However it +The `!TypeInfo` object doesn't instruct GaussDB to convert a PostgreSQL type +into a Python type: this is the role of a `~gaussdb.abc.Loader`. However it can extend the behaviour of other adapters: if you create a loader for -`!MyType`, using the `TypeInfo` information, Psycopg will be able to manage +`!MyType`, using the `TypeInfo` information, GaussDB will be able to manage seamlessly arrays of `!MyType` or ranges and composite types using `!MyType` as a subtype. @@ -44,8 +44,8 @@ as a subtype. .. code:: python - from psycopg.adapt import Loader - from psycopg.types import TypeInfo + from gaussdb.adapt import Loader + from gaussdb.types import TypeInfo t = TypeInfo.fetch(conn, "mytype") t.register(conn) @@ -76,10 +76,10 @@ as a subtype. Query a system catalog to read information about a type. :param conn: the connection to query - :type conn: ~psycopg.Connection or ~psycopg.AsyncConnection + :type conn: ~gaussdb.Connection or ~gaussdb.AsyncConnection :param name: the name of the type to query. It can include a schema name. - :type name: `!str` or `~psycopg.sql.Identifier` + :type name: `!str` or `~gaussdb.sql.Identifier` :return: a `!TypeInfo` object (or subclass) populated with the type information, `!None` if not found. @@ -91,9 +91,9 @@ as a subtype. .. automethod:: register :param context: the context where the type is registered, for instance - a `~psycopg.Connection` or `~psycopg.Cursor`. `!None` registers + a `~gaussdb.Connection` or `~gaussdb.Cursor`. `!None` registers the `!TypeInfo` globally. - :type context: Optional[~psycopg.abc.AdaptContext] + :type context: Optional[~gaussdb.abc.AdaptContext] Registering the `TypeInfo` in a context allows the adapters of that context to look up type information: for instance it allows to @@ -101,35 +101,35 @@ as a subtype. database as a list of the base type. -In order to get information about dynamic PostgreSQL types, Psycopg offers a +In order to get information about dynamic PostgreSQL types, GaussDB offers a few `!TypeInfo` subclasses, whose `!fetch()` method can extract more complete -information about the type, such as `~psycopg.types.composite.CompositeInfo`, -`~psycopg.types.range.RangeInfo`, `~psycopg.types.multirange.MultirangeInfo`, -`~psycopg.types.enum.EnumInfo`. +information about the type, such as `~gaussdb.types.composite.CompositeInfo`, +`~gaussdb.types.range.RangeInfo`, `~gaussdb.types.multirange.MultirangeInfo`, +`~gaussdb.types.enum.EnumInfo`. `!TypeInfo` objects are collected in `TypesRegistry` instances, which help type -information lookup. Every `~psycopg.adapt.AdaptersMap` exposes its type map on -its `~psycopg.adapt.AdaptersMap.types` attribute. +information lookup. Every `~gaussdb.adapt.AdaptersMap` exposes its type map on +its `~gaussdb.adapt.AdaptersMap.types` attribute. .. autoclass:: TypesRegistry `!TypeRegistry` instances are typically exposed by - `~psycopg.adapt.AdaptersMap` objects in adapt contexts such as - `~psycopg.Connection` or `~psycopg.Cursor` (e.g. `!conn.adapters.types`). + `~gaussdb.adapt.AdaptersMap` objects in adapt contexts such as + `~gaussdb.Connection` or `~gaussdb.Cursor` (e.g. `!conn.adapters.types`). The global registry, from which the others inherit from, is available as - `psycopg.adapters`\ `!.types`. + `gaussdb.adapters`\ `!.types`. .. automethod:: __getitem__ .. code:: python - >>> import psycopg + >>> import gaussdb - >>> psycopg.adapters.types["text"] + >>> gaussdb.adapters.types["text"] - >>> psycopg.adapters.types[23] + >>> gaussdb.adapters.types[23] .. automethod:: get @@ -138,7 +138,7 @@ its `~psycopg.adapt.AdaptersMap.types` attribute. .. code:: python - >>> psycopg.adapters.types.get_oid("text[]") + >>> gaussdb.adapters.types.get_oid("text[]") 1009 .. automethod:: get_by_subtype @@ -151,7 +151,7 @@ JSON adapters See :ref:`adapt-json` for details. -.. currentmodule:: psycopg.types.json +.. currentmodule:: gaussdb.types.json .. autoclass:: Json .. autoclass:: Jsonb diff --git a/docs/basic/adapt.rst b/docs/basic/adapt.rst index 1aa562c0f..215dfab24 100644 --- a/docs/basic/adapt.rst +++ b/docs/basic/adapt.rst @@ -1,4 +1,4 @@ -.. currentmodule:: psycopg +.. currentmodule:: gaussdb .. index:: single: Adaptation @@ -56,7 +56,7 @@ Numbers adaptation - Python `int` values can be converted to PostgreSQL :sql:`smallint`, :sql:`integer`, :sql:`bigint`, or :sql:`numeric`, according to their numeric - value. Psycopg will choose the smallest data type available, because + value. GaussDB will choose the smallest data type available, because PostgreSQL can automatically cast a type up (e.g. passing a `smallint` where PostgreSQL expect an `integer` is gladly accepted) but will not cast down automatically (e.g. if a function has an :sql:`integer` argument, passing it @@ -104,7 +104,7 @@ such as :sql:`text` and :sql:`varchar` are converted back to Python `!str`: .. code:: python - conn = psycopg.connect() + conn = gaussdb.connect() conn.execute( "INSERT INTO menu (id, entry) VALUES (%s, %s)", (1, "Crème Brûlée at 4.99€")) @@ -272,7 +272,7 @@ represented by the Python `datetime` objects: - dates and timestamps before the year 1, the special value "-infinity"; - the time 24:00:00. -Loading these values will raise a `~psycopg.DataError`. +Loading these values will raise a `~gaussdb.DataError`. If you need to handle these values you can define your own mapping (for instance mapping every value greater than `datetime.date.max` to `!date.max`, @@ -306,7 +306,7 @@ If your server is configured with different settings by default, you can obtain a connection in a supported style using the ``options`` connection parameter; for example:: - >>> conn = psycopg.connect(options="-c datestyle=ISO,YMD") + >>> conn = gaussdb.connect(options="-c datestyle=ISO,YMD") >>> conn.execute("show datestyle").fetchone()[0] # 'ISO, YMD' @@ -320,35 +320,35 @@ DateStyle or IntervalStyle. JSON adaptation --------------- -Psycopg can map between Python objects and PostgreSQL `json/jsonb +GaussDB can map between Python objects and PostgreSQL `json/jsonb types`__, allowing to customise the load and dump function used. .. __: https://www.postgresql.org/docs/current/datatype-json.html Because several Python objects could be considered JSON (dicts, lists, scalars, even date/time if using a dumps function customised to use them), -Psycopg requires you to wrap the object to dump as JSON into a wrapper: -either `psycopg.types.json.Json` or `~psycopg.types.json.Jsonb`. +GaussDB requires you to wrap the object to dump as JSON into a wrapper: +either `gaussdb.types.json.Json` or `~gaussdb.types.json.Jsonb`. .. code:: python - from psycopg.types.json import Jsonb + from gaussdb.types.json import Jsonb thing = {"foo": ["bar", 42]} conn.execute("INSERT INTO mytable VALUES (%s)", [Jsonb(thing)]) -By default Psycopg uses the standard library `json.dumps` and `json.loads` +By default GaussDB uses the standard library `json.dumps` and `json.loads` functions to serialize and de-serialize Python objects to JSON. If you want to customise how serialization happens, for instance changing serialization parameters or using a different JSON library, you can specify your own -functions using the `psycopg.types.json.set_json_dumps()` and -`~psycopg.types.json.set_json_loads()` functions, to apply either globally or +functions using the `gaussdb.types.json.set_json_dumps()` and +`~gaussdb.types.json.set_json_loads()` functions, to apply either globally or to a specific context (connection or cursor). .. code:: python from functools import partial - from psycopg.types.json import Jsonb, set_json_dumps, set_json_loads + from gaussdb.types.json import Jsonb, set_json_dumps, set_json_loads import ujson # Use a faster dump function @@ -363,7 +363,7 @@ to a specific context (connection or cursor). If you need an even more specific dump customisation only for certain objects (including different configurations in the same query) you can specify a `!dumps` parameter in the -`~psycopg.types.json.Json`/`~psycopg.types.json.Jsonb` wrapper, which will +`~gaussdb.types.json.Json`/`~gaussdb.types.json.Jsonb` wrapper, which will take precedence over what is specified by `!set_json_dumps()`. .. code:: python @@ -402,7 +402,7 @@ list may contain `!None` elements). >>> conn.execute("SELECT * FROM mytable WHERE id IN %s", [[10,20,30]]) Traceback (most recent call last): File "", line 1, in - psycopg.errors.SyntaxError: syntax error at or near "$1" + gaussdb.errors.SyntaxError: syntax error at or near "$1" LINE 1: SELECT * FROM mytable WHERE id IN $1 ^ @@ -473,7 +473,7 @@ Enum adaptation .. versionadded:: 3.1 -Psycopg can adapt Python `~enum.Enum` subclasses into PostgreSQL enum types +GaussDB can adapt Python `~enum.Enum` subclasses into PostgreSQL enum types (created with the |CREATE TYPE AS ENUM|_ command). .. |CREATE TYPE AS ENUM| replace:: :sql:`CREATE TYPE ... AS ENUM (...)` @@ -510,15 +510,15 @@ and registered enums is different. - The registered PostgreSQL enum is loaded back as the registered Python enum members. -.. autoclass:: psycopg.types.enum.EnumInfo +.. autoclass:: gaussdb.types.enum.EnumInfo - `!EnumInfo` is a subclass of `~psycopg.types.TypeInfo`: refer to the + `!EnumInfo` is a subclass of `~gaussdb.types.TypeInfo`: refer to the latter's documentation for generic usage, especially the - `~psycopg.types.TypeInfo.fetch()` method. + `~gaussdb.types.TypeInfo.fetch()` method. .. attribute:: labels - After `~psycopg.types.TypeInfo.fetch()`, it contains the labels defined + After `~gaussdb.types.TypeInfo.fetch()`, it contains the labels defined in the PostgreSQL enum type. .. attribute:: enum @@ -526,7 +526,7 @@ and registered enums is different. After `register_enum()` is called, it will contain the Python type mapping to the registered enum. -.. autofunction:: psycopg.types.enum.register_enum +.. autofunction:: gaussdb.types.enum.register_enum After registering, fetching data of the registered enum will cast PostgreSQL enum labels into corresponding Python enum members. @@ -537,7 +537,7 @@ and registered enums is different. Example:: >>> from enum import Enum, auto - >>> from psycopg.types.enum import EnumInfo, register_enum + >>> from gaussdb.types.enum import EnumInfo, register_enum >>> class UserRole(Enum): ... ADMIN = auto() diff --git a/docs/basic/copy.rst b/docs/basic/copy.rst index 2bb44985f..8ca27c1c2 100644 --- a/docs/basic/copy.rst +++ b/docs/basic/copy.rst @@ -1,4 +1,4 @@ -.. currentmodule:: psycopg +.. currentmodule:: gaussdb .. index:: pair: COPY; SQL command @@ -8,7 +8,7 @@ Using COPY TO and COPY FROM =========================== -Psycopg allows to operate with `PostgreSQL COPY protocol`__. :sql:`COPY` is +GaussDB allows to operate with `PostgreSQL COPY protocol`__. :sql:`COPY` is one of the most efficient ways to load data into the database (and to modify it, with some SQL creativity). @@ -24,7 +24,7 @@ resulting `Copy` object in a `!with` block: # pass data to the 'copy' object using write()/write_row() You can compose a COPY statement dynamically by using objects from the -`psycopg.sql` module: +`gaussdb.sql` module: .. code:: python @@ -155,7 +155,7 @@ necessary if the data is copied :ref:`block-by-block ` using PostgreSQL is particularly finicky when loading data in binary mode and will apply **no cast rules**. This means, for example, that passing the - value 100 to an `integer` column **will fail**, because Psycopg will pass + value 100 to an `integer` column **will fail**, because GaussDB will pass it as a `smallint` value, and the server will reject it because its size doesn't match what expected. @@ -196,7 +196,7 @@ writing to the second. .. code:: python - with psycopg.connect(dsn_src) as conn1, psycopg.connect(dsn_tgt) as conn2: + with gaussdb.connect(dsn_src) as conn1, gaussdb.connect(dsn_tgt) as conn2: with conn1.cursor().copy("COPY src TO STDOUT (FORMAT BINARY)") as copy1: with conn2.cursor().copy("COPY tgt FROM STDIN (FORMAT BINARY)") as copy2: for data in copy1: diff --git a/docs/basic/from_pg2.rst b/docs/basic/from_pg2.rst index 7fb94d92f..4eb3594ca 100644 --- a/docs/basic/from_pg2.rst +++ b/docs/basic/from_pg2.rst @@ -1,22 +1,22 @@ .. index:: - pair: psycopg2; Differences + pair: _GaussDB; Differences -.. currentmodule:: psycopg +.. currentmodule:: gaussdb -.. _from-psycopg2: +.. _from-_GaussDB: -Differences from `!psycopg2` +Differences from `!_GaussDB` ============================ -Psycopg 3 uses the common DBAPI structure of many other database adapters and -tries to behave as close as possible to `!psycopg2`. There are however a few +gaussdb uses the common DBAPI structure of many other database adapters and +tries to behave as close as possible to `!_GaussDB`. There are however a few differences to be aware of. .. tip:: Most of the times, the workarounds suggested here will work with both - Psycopg 2 and 3, which could be useful if you are porting a program or - writing a program that should work with both Psycopg 2 and 3. + GaussDB 2 and 3, which could be useful if you are porting a program or + writing a program that should work with both GaussDB 2 and 3. .. _server-side-binding: @@ -24,7 +24,7 @@ differences to be aware of. Server-side binding ------------------- -Psycopg 3 sends the query and the parameters to the server separately, instead +gaussdb sends the query and the parameters to the server separately, instead of merging them on the client side. Server-side binding works for normal :sql:`SELECT` and data manipulation statements (:sql:`INSERT`, :sql:`UPDATE`, :sql:`DELETE`), but it doesn't work with many other statements. For instance, @@ -33,14 +33,14 @@ it doesn't work with :sql:`SET` or with :sql:`NOTIFY`:: >>> conn.execute("SET TimeZone TO %s", ["UTC"]) Traceback (most recent call last): ... - psycopg.errors.SyntaxError: syntax error at or near "$1" + gaussdb.errors.SyntaxError: syntax error at or near "$1" LINE 1: SET TimeZone TO $1 ^ >>> conn.execute("NOTIFY %s, %s", ["chan", 42]) Traceback (most recent call last): ... - psycopg.errors.SyntaxError: syntax error at or near "$1" + gaussdb.errors.SyntaxError: syntax error at or near "$1" LINE 1: NOTIFY $1, $2 ^ @@ -49,7 +49,7 @@ and with any data definition statement:: >>> conn.execute("CREATE TABLE foo (id int DEFAULT %s)", [42]) Traceback (most recent call last): ... - psycopg.errors.UndefinedParameter: there is no parameter $1 + gaussdb.errors.UndefinedParameter: there is no parameter $1 LINE 1: CREATE TABLE foo (id int DEFAULT $1) ^ @@ -68,9 +68,9 @@ function can be used instead of :sql:`NOTIFY`:: #id-1.9.3.157.7.5 If this is not possible, you must merge the query and the parameter on the -client side. You can do so using the `psycopg.sql` objects:: +client side. You can do so using the `gaussdb.sql` objects:: - >>> from psycopg import sql + >>> from gaussdb import sql >>> cur.execute(sql.SQL("CREATE TABLE foo (id int DEFAULT {})").format(42)) @@ -81,15 +81,15 @@ such as `ClientCursor`:: >>> cur.execute("CREATE TABLE foo (id int DEFAULT %s)", [42]) If you need `!ClientCursor` often, you can set the `Connection.cursor_factory` -to have them created by default by `Connection.cursor()`. This way, Psycopg 3 -will behave largely the same way of Psycopg 2. +to have them created by default by `Connection.cursor()`. This way, gaussdb +will behave largely the same way of GaussDB 2. Note that, both server-side and client-side, you can only specify **values** as parameters (i.e. *the strings that go in single quotes*). If you need to parametrize different parts of a statement (such as a table name), you must -use the `psycopg.sql` module:: +use the `gaussdb.sql` module:: - >>> from psycopg import sql + >>> from gaussdb import sql # This will quote the user and the password using the right quotes # e.g.: ALTER USER "foo" SET PASSWORD 'bar' @@ -107,7 +107,7 @@ use the `psycopg.sql` module:: Extended query Protocol ----------------------- -In order to use :ref:`server-side-binding`, psycopg normally uses the +In order to use :ref:`server-side-binding`, gaussdb normally uses the `extended query protocol`__ to communicate with the backend. In certain context outside pure PostgreSQL, the extended query protocol is not @@ -133,23 +133,23 @@ the same `!execute()` call, separating them by semicolon:: ... (10, 20)) Traceback (most recent call last): ... - psycopg.errors.SyntaxError: cannot insert multiple commands into a prepared statement + gaussdb.errors.SyntaxError: cannot insert multiple commands into a prepared statement One obvious way to work around the problem is to use several `!execute()` calls. **There is no such limitation if no parameters are used**. As a consequence, you can compose a multiple query on the client side and run them all in the same -`!execute()` call, using the `psycopg.sql` objects:: +`!execute()` call, using the `gaussdb.sql` objects:: - >>> from psycopg import sql + >>> from gaussdb import sql >>> conn.execute( ... sql.SQL("INSERT INTO foo VALUES ({}); INSERT INTO foo values ({})" ... .format(10, 20)) or a :ref:`client-side binding cursor `:: - >>> cur = psycopg.ClientCursor(conn) + >>> cur = gaussdb.ClientCursor(conn) >>> cur.execute( ... "INSERT INTO foo VALUES (%s); INSERT INTO foo VALUES (%s)", ... (10, 20)) @@ -164,15 +164,15 @@ or a :ref:`client-side binding cursor `:: >>> conn.execute("CREATE DATABASE foo; SELECT 1") Traceback (most recent call last): ... - psycopg.errors.ActiveSqlTransaction: CREATE DATABASE cannot run inside a transaction block + gaussdb.errors.ActiveSqlTransaction: CREATE DATABASE cannot run inside a transaction block This happens because PostgreSQL itself will wrap multiple statements in a transaction. Note that you will experience a different behaviour in :program:`psql` (:program:`psql` will split the queries on semicolons and send them to the server separately). - This is not new in Psycopg 3: the same limitation is present in - `!psycopg2` too. + This is not new in gaussdb: the same limitation is present in + `!_GaussDB` too. .. _multi-results: @@ -180,14 +180,14 @@ or a :ref:`client-side binding cursor `:: Multiple results returned from multiple statements -------------------------------------------------- -If more than one statement returning results is executed in psycopg2, only the +If more than one statement returning results is executed in _GaussDB, only the result of the last statement is returned:: >>> cur_pg2.execute("SELECT 1; SELECT 2") >>> cur_pg2.fetchone() (2,) -In Psycopg 3 instead, all the results are available. After running the query, +In gaussdb instead, all the results are available. After running the query, the first result will be readily available in the cursor and can be consumed using the usual `!fetch*()` methods. In order to access the following results, you can use the `Cursor.nextset()` method:: @@ -220,7 +220,7 @@ find a function candidate for the given data types:: >>> conn.execute("SELECT json_build_array(%s, %s)", ["foo", "bar"]) Traceback (most recent call last): ... - psycopg.errors.IndeterminateDatatype: could not determine data type of parameter $1 + gaussdb.errors.IndeterminateDatatype: could not determine data type of parameter $1 This can be worked around specifying the argument types explicitly via a cast:: @@ -233,12 +233,12 @@ You cannot use ``IN %s`` with a tuple ------------------------------------- ``IN`` cannot be used with a tuple as single parameter, as was possible with -``psycopg2``:: +``_GaussDB``:: >>> conn.execute("SELECT * FROM foo WHERE id IN %s", [(10,20,30)]) Traceback (most recent call last): ... - psycopg.errors.SyntaxError: syntax error at or near "$1" + gaussdb.errors.SyntaxError: syntax error at or near "$1" LINE 1: SELECT * FROM foo WHERE id IN $1 ^ @@ -248,7 +248,7 @@ array:: >>> conn.execute("SELECT * FROM foo WHERE id = ANY(%s)", [[10,20,30]]) -Note that `ANY()` can be used with `!psycopg2` too, and has the advantage of +Note that `ANY()` can be used with `!_GaussDB` too, and has the advantage of accepting an empty list of values too as argument, which is not supported by the :sql:`IN` operator instead. @@ -266,7 +266,7 @@ You cannot use :sql:`IS %s` or :sql:`IS NOT %s`:: >>> conn.execute("SELECT * FROM foo WHERE field IS %s", [None]) Traceback (most recent call last): ... - psycopg.errors.SyntaxError: syntax error at or near "$1" + gaussdb.errors.SyntaxError: syntax error at or near "$1" LINE 1: SELECT * FROM foo WHERE field IS $1 ^ @@ -299,26 +299,26 @@ Analogously you can use :sql:`IS DISTINCT FROM %s` as a parametric version of Cursors subclasses ------------------ -In `!psycopg2`, a few cursor subclasses allowed to return data in different -form than tuples. In Psycopg 3 the same can be achieved by setting a :ref:`row +In `!_GaussDB`, a few cursor subclasses allowed to return data in different +form than tuples. In gaussdb the same can be achieved by setting a :ref:`row factory `: -- instead of `~psycopg2.extras.RealDictCursor` you can use - `~psycopg.rows.dict_row`; +- instead of `~_GaussDB.extras.RealDictCursor` you can use + `~gaussdb.rows.dict_row`; -- instead of `~psycopg2.extras.NamedTupleCursor` you can use - `~psycopg.rows.namedtuple_row`. +- instead of `~_GaussDB.extras.NamedTupleCursor` you can use + `~gaussdb.rows.namedtuple_row`. -Other row factories are available in the `psycopg.rows` module. There isn't an -object behaving like `~psycopg2.extras.DictCursor` (whose results are +Other row factories are available in the `gaussdb.rows` module. There isn't an +object behaving like `~_GaussDB.extras.DictCursor` (whose results are indexable both by column position and by column name). .. code:: - from psycopg.rows import dict_row, namedtuple_row + from gaussdb.rows import dict_row, namedtuple_row # By default, every cursor will return dicts. - conn = psycopg.connect(DSN, row_factory=dict_row) + conn = gaussdb.connect(DSN, row_factory=dict_row) # You can set a row factory on a single cursor too. cur = conn.cursor(row_factory=namedtuple_row) @@ -349,12 +349,12 @@ adaptation system `. Copy is no longer file-based ---------------------------- -`!psycopg2` exposes :ref:`a few copy methods ` to interact with +`!_GaussDB` exposes :ref:`a few copy methods ` to interact with PostgreSQL :sql:`COPY`. Their file-based interface doesn't make it easy to load dynamically-generated data into a database. There is now a single `~Cursor.copy()` method, which is similar to -`!psycopg2` `!copy_expert()` in accepting a free-form :sql:`COPY` command and +`!_GaussDB` `!copy_expert()` in accepting a free-form :sql:`COPY` command and returns an object to read/write data, block-wise or record-wise. The different usage pattern also enables :sql:`COPY` to be used in async interactions. @@ -366,12 +366,12 @@ usage pattern also enables :sql:`COPY` to be used in async interactions. `!with` connection ------------------ -In `!psycopg2`, using the syntax :ref:`with connection `, +In `!_GaussDB`, using the syntax :ref:`with connection `, only the transaction is closed, not the connection. This behaviour is surprising for people used to several other Python classes wrapping resources, such as files. -In Psycopg 3, using :ref:`with connection ` will close the +In gaussdb, using :ref:`with connection ` will close the connection at the end of the `!with` block, making handling the connection resources more familiar. @@ -398,7 +398,7 @@ function_name(...)` or :sql:`CALL procedure_name(...)` instead. `!client_encoding` is gone -------------------------- -Psycopg automatically uses the database client encoding to decode data to +GaussDB automatically uses the database client encoding to decode data to Unicode strings. Use `ConnectionInfo.encoding` if you need to read the encoding. You can select an encoding at connection time using the `!client_encoding` connection parameter and you can change the encoding of a @@ -437,25 +437,25 @@ Python. While Python dates are limited to the years between 1 and 9999 10K. Furthermore PostgreSQL can also represent symbolic dates "infinity", in both directions. -In psycopg2, by default, `infinity dates and timestamps map to 'date.max'`__ +In _GaussDB, by default, `infinity dates and timestamps map to 'date.max'`__ and similar constants. This has the problem of creating a non-bijective mapping (two Postgres dates, infinity and 9999-12-31, both map to the same Python date). There is also the perversity that valid Postgres dates, greater than Python `!date.max` but arguably lesser than infinity, will still overflow. -In Psycopg 3, every date greater than year 9999 will overflow, including +In gaussdb, every date greater than year 9999 will overflow, including infinity. If you would like to customize this mapping (for instance flattening every date past Y10K on `!date.max`) you can subclass and adapt the appropriate loaders: take a look at :ref:`this example ` to see how. -.. __: https://www.psycopg.org/docs/usage.html#infinite-dates-handling +.. __: https://www.gaussdb.org/docs/usage.html#infinite-dates-handling .. _whats-new: -What's new in Psycopg 3 +What's new in gaussdb ----------------------- - :ref:`Asynchronous support ` @@ -465,4 +465,4 @@ What's new in Psycopg 3 - :ref:`Python-based COPY support ` - :ref:`Support for static typing ` - :ref:`A redesigned connection pool ` -- :ref:`Direct access to the libpq functionalities ` +- :ref:`Direct access to the libpq functionalities ` diff --git a/docs/basic/index.rst b/docs/basic/index.rst index bf9e27da4..d4a4b9ed2 100644 --- a/docs/basic/index.rst +++ b/docs/basic/index.rst @@ -1,16 +1,16 @@ .. _basic: -Getting started with Psycopg 3 +Getting started with gaussdb ============================== -This section of the documentation will explain :ref:`how to install Psycopg +This section of the documentation will explain :ref:`how to install GaussDB ` and how to perform normal activities such as :ref:`querying the database ` or :ref:`loading data using COPY `. .. important:: - If you are familiar with psycopg2 please take a look at - :ref:`from-psycopg2` to see what is changed. + If you are familiar with _GaussDB please take a look at + :ref:`from-_GaussDB` to see what is changed. .. toctree:: :maxdepth: 2 diff --git a/docs/basic/install.rst b/docs/basic/install.rst index 084b7b9b7..0f793695d 100644 --- a/docs/basic/install.rst +++ b/docs/basic/install.rst @@ -6,13 +6,13 @@ Installation In short, if you use a :ref:`supported system`:: pip install --upgrade pip # upgrade pip to at least 20.3 - pip install "psycopg[binary]" # remove [binary] for PyPy + pip install "gaussdb[binary]" # remove [binary] for PyPy and you should be :ref:`ready to start `. Read further for alternative ways to install. .. note:: - Fun fact: there is no ``psycopg3`` package, only ``psycopg``! + Fun fact: there is no ``gaussdb`` package, only ``gaussdb``! .. _supported-systems: @@ -20,13 +20,13 @@ alternative ways to install. Supported systems ----------------- -The Psycopg version documented here has *official and tested* support for: +The GaussDB version documented here has *official and tested* support for: - Python: from version 3.9 to 3.13 - - Python 3.8 supported before Psycopg 3.3 - - Python 3.7 supported before Psycopg 3.2 - - Python 3.6 supported before Psycopg 3.1 + - Python 3.8 supported before gaussdb.3 + - Python 3.7 supported before gaussdb.2 + - Python 3.6 supported before gaussdb.1 - PyPy: from version 3.9 to 3.10 @@ -44,7 +44,7 @@ The Psycopg version documented here has *official and tested* support for: The tests to verify the supported systems run in `Github workflows`__: anything that is not tested there is not officially supported. This includes: -.. __: https://github.com/psycopg/psycopg/actions +.. __: https://github.com/gaussdb/gaussdb/actions - Unofficial Python distributions such as Conda; - Alternative PostgreSQL implementation; @@ -60,10 +60,10 @@ the correct working or a smooth ride. Binary installation ------------------- -The quickest way to start developing with Psycopg 3 is to install the binary +The quickest way to start developing with gaussdb is to install the binary packages by running:: - pip install "psycopg[binary]" + pip install "gaussdb[binary]" This will install a self-contained package with all the libraries needed. **You will need pip 20.3 at least**: please run ``pip install --upgrade pip`` @@ -71,11 +71,11 @@ to update it beforehand. .. seealso:: - Did Psycopg 3 install ok? Great! You can now move on to the :ref:`basic + Did gaussdb install ok? Great! You can now move on to the :ref:`basic module usage ` to learn how it works. Keep on reading if the above method didn't work and you need a different - way to install Psycopg 3. + way to install gaussdb. For further information about the differences between the packages see :ref:`pq-impl`. @@ -92,20 +92,20 @@ you should proceed to a :ref:`local installation ` or a - binary packages for a new version of Python are made available once the runners used for the build support it. You can check the - `psycopg-binary PyPI files`__ to verify whether your platform is + `gaussdb-binary PyPI files`__ to verify whether your platform is supported; - the libpq version included in the binary packages depends on the version - available on the runners. You can use the `psycopg.pq.version()` - function and `~psycopg.pq.__build_version__` constant to infer the + available on the runners. You can use the `gaussdb.pq.version()` + function and `~gaussdb.pq.__build_version__` constant to infer the features available. - .. __: https://pypi.org/project/psycopg-binary/#files + .. __: https://pypi.org/project/gaussdb-binary/#files .. warning:: - - Starting from Psycopg 3.1.20, ARM64 macOS binary packages (i.e. for + - Starting from gaussdb.1.20, ARM64 macOS binary packages (i.e. for Apple M1 machines) are no more available for macOS versions before 14.0. Please upgrade your OS to at least 14.0 or use a :ref:`local ` or a :ref:`Python ` @@ -123,8 +123,8 @@ Local installation A "Local installation" results in a performing and maintainable library. The library will include the speed-up C module and will be linked to the system libraries (``libpq``, ``libssl``...) so that system upgrade of libraries will -upgrade the libraries used by Psycopg 3 too. This is the preferred way to -install Psycopg for a production site. +upgrade the libraries used by gaussdb too. This is the preferred way to +install GaussDB for a production site. In order to perform a local installation you need some prerequisites: @@ -139,7 +139,7 @@ try this and follow the `binary installation`_ instead. If your build prerequisites are in place you can run:: - pip install "psycopg[c]" + pip install "gaussdb[c]" .. warning:: @@ -153,7 +153,7 @@ Pure Python installation If you simply install:: - pip install psycopg + pip install gaussdb without ``[c]`` or ``[binary]`` extras you will obtain a pure Python implementation. This is particularly handy to debug and hack, but it still @@ -182,12 +182,12 @@ installation`_. Installing the connection pool ------------------------------ -The :ref:`Psycopg connection pools ` are distributed in a -separate package from the `!psycopg` package itself, in order to allow a +The :ref:`GaussDB connection pools ` are distributed in a +separate package from the `!gaussdb` package itself, in order to allow a different release cycle. In order to use the pool you must install the ``pool`` extra, using ``pip -install "psycopg[pool]"``, or install the `psycopg_pool` package separately, +install "gaussdb[pool]"``, or install the `gaussdb_pool` package separately, which would allow to specify the release to install more precisely. @@ -198,21 +198,21 @@ If you need to specify your project dependencies (for instance in a ``requirements.txt`` file, ``setup.py``, ``pyproject.toml`` dependencies...) you should probably specify one of the following: -- If your project is a library, add a dependency on ``psycopg``. This will - make sure that your library will have the ``psycopg`` package with the right +- If your project is a library, add a dependency on ``gaussdb``. This will + make sure that your library will have the ``gaussdb`` package with the right interface and leaves the possibility of choosing a specific implementation to the end user of your library. - If your project is a final application (e.g. a service running on a server) - you can require a specific implementation, for instance ``psycopg[c]``, + you can require a specific implementation, for instance ``gaussdb[c]``, after you have made sure that the prerequisites are met (e.g. the depending libraries and tools are installed in the host machine). -In both cases you can specify which version of Psycopg to use using +In both cases you can specify which version of GaussDB to use using `requirement specifiers`__. .. __: https://pip.pypa.io/en/stable/reference/requirement-specifiers/ If you want to make sure that a specific implementation is used you can -specify the :envvar:`PSYCOPG_IMPL` environment variable: importing the library +specify the :envvar:`GAUSSDB_IMPL` environment variable: importing the library will fail if the implementation specified is not available. See :ref:`pq-impl`. diff --git a/docs/basic/params.rst b/docs/basic/params.rst index a733f0739..8eaf6d3af 100644 --- a/docs/basic/params.rst +++ b/docs/basic/params.rst @@ -1,4 +1,4 @@ -.. currentmodule:: psycopg +.. currentmodule:: gaussdb .. index:: pair: Query; Parameters @@ -100,7 +100,7 @@ query. - The variables placeholder *must always be a* ``%s``, even if a different placeholder (such as a ``%d`` for integers or ``%f`` for floats) may look more appropriate for the type. You may find other placeholders used in - Psycopg queries (``%b`` and ``%t``) but they are not related to the + GaussDB queries (``%b`` and ``%t``) but they are not related to the type of the argument: see :ref:`binary-data` if you want to read more:: cur.execute("INSERT INTO numbers VALUES (%d)", (10,)) # WRONG @@ -109,7 +109,7 @@ query. - Only query values should be bound via this method: it shouldn't be used to merge table or field names to the query. If you need to generate SQL queries dynamically (for instance choosing a table name at runtime) you can use the - functionalities provided in the `psycopg.sql` module:: + functionalities provided in the `gaussdb.sql` module:: cur.execute("INSERT INTO %s VALUES (%s)", ('numbers', 10)) # WRONG cur.execute( # correct @@ -150,7 +150,7 @@ as a memo and hang it onto your desk. .. _SQL injection: https://en.wikipedia.org/wiki/SQL_injection .. __: https://xkcd.com/327/ -Psycopg can :ref:`automatically convert Python objects to SQL +GaussDB can :ref:`automatically convert Python objects to SQL values`: using this feature your code will be more robust and reliable. We must stress this point: @@ -188,7 +188,7 @@ argument of the `Cursor.execute()` method:: .. seealso:: Now that you know how to pass parameters to queries, you can take a look - at :ref:`how Psycopg converts data types `. + at :ref:`how GaussDB converts data types `. .. index:: @@ -200,11 +200,11 @@ Binary parameters and results ----------------------------- PostgreSQL has two different ways to transmit data between client and server: -`~psycopg.pq.Format.TEXT`, always available, and `~psycopg.pq.Format.BINARY`, +`~gaussdb.pq.Format.TEXT`, always available, and `~gaussdb.pq.Format.BINARY`, available most of the times but not always. Usually the binary format is more efficient to use. -Psycopg can support both formats for each data type. Whenever a value +GaussDB can support both formats for each data type. Whenever a value is passed to a query using the normal ``%s`` placeholder, the best format available is chosen (often, but not always, the binary format is picked as the best choice). @@ -212,12 +212,12 @@ best choice). If you have a reason to select explicitly the binary format or the text format for a value you can use respectively a ``%b`` placeholder or a ``%t`` placeholder instead of the normal ``%s``. `~Cursor.execute()` will fail if a -`~psycopg.adapt.Dumper` for the right data type and format is not available. +`~gaussdb.adapt.Dumper` for the right data type and format is not available. The same two formats, text or binary, are used by PostgreSQL to return data from a query to the client. Unlike with parameters, where you can choose the format value-by-value, all the columns returned by a query will have the same -format. Every type returned by the query should have a `~psycopg.adapt.Loader` +format. Every type returned by the query should have a `~gaussdb.adapt.Loader` configured, otherwise the data will be returned as unparsed `!str` (for text results) or buffer (for binary results). diff --git a/docs/basic/pgtypes.rst b/docs/basic/pgtypes.rst index 14ee5bee3..2ed1466ba 100644 --- a/docs/basic/pgtypes.rst +++ b/docs/basic/pgtypes.rst @@ -1,4 +1,4 @@ -.. currentmodule:: psycopg +.. currentmodule:: gaussdb .. index:: single: Adaptation @@ -11,7 +11,7 @@ Adapting other PostgreSQL types =============================== PostgreSQL offers other data types which don't map to native Python types. -Psycopg offers wrappers and conversion functions to allow their use. +GaussDB offers wrappers and conversion functions to allow their use. .. index:: @@ -24,7 +24,7 @@ Psycopg offers wrappers and conversion functions to allow their use. Composite types casting ----------------------- -Psycopg can adapt PostgreSQL composite types (either created with the |CREATE +GaussDB can adapt PostgreSQL composite types (either created with the |CREATE TYPE|_ command or implicitly defined after a table row type) to and from Python tuples, `~collections.namedtuple`, or any other suitable object configured. @@ -33,21 +33,21 @@ configured. .. _CREATE TYPE: https://www.postgresql.org/docs/current/static/sql-createtype.html Before using a composite type it is necessary to get information about it -using the `~psycopg.types.composite.CompositeInfo` class and to register it -using `~psycopg.types.composite.register_composite()`. +using the `~gaussdb.types.composite.CompositeInfo` class and to register it +using `~gaussdb.types.composite.register_composite()`. -.. autoclass:: psycopg.types.composite.CompositeInfo +.. autoclass:: gaussdb.types.composite.CompositeInfo - `!CompositeInfo` is a `~psycopg.types.TypeInfo` subclass: check its + `!CompositeInfo` is a `~gaussdb.types.TypeInfo` subclass: check its documentation for the generic usage, especially the - `~psycopg.types.TypeInfo.fetch()` method. + `~gaussdb.types.TypeInfo.fetch()` method. .. attribute:: python_type After `register_composite()` is called, it will contain the python type mapping to the registered composite. -.. autofunction:: psycopg.types.composite.register_composite +.. autofunction:: gaussdb.types.composite.register_composite After registering, fetching data of the registered composite will invoke `!factory` to create corresponding Python objects. @@ -61,7 +61,7 @@ using `~psycopg.types.composite.register_composite()`. Example:: - >>> from psycopg.types.composite import CompositeInfo, register_composite + >>> from gaussdb.types.composite import CompositeInfo, register_composite >>> conn.execute("CREATE TYPE card AS (value int, suit text)") @@ -108,11 +108,11 @@ definition of custom ones. .. __: https://www.postgresql.org/docs/current/rangetypes.html -All the PostgreSQL range types are loaded as the `~psycopg.types.range.Range` +All the PostgreSQL range types are loaded as the `~gaussdb.types.range.Range` Python type, which is a `~typing.Generic` type and can hold bounds of different types. -.. autoclass:: psycopg.types.range.Range +.. autoclass:: gaussdb.types.range.Range This Python type is only used to pass and retrieve range values to and from PostgreSQL and doesn't attempt to replicate the PostgreSQL range @@ -144,21 +144,21 @@ The built-in range objects are adapted automatically: if a `!Range` objects contains `~datetime.date` bounds, it is dumped using the :sql:`daterange` OID, and of course :sql:`daterange` values are loaded back as `!Range[date]`. -If you create your own range type you can use `~psycopg.types.range.RangeInfo` -and `~psycopg.types.range.register_range()` to associate the range type with +If you create your own range type you can use `~gaussdb.types.range.RangeInfo` +and `~gaussdb.types.range.register_range()` to associate the range type with its subtype and make it work like the builtin ones. -.. autoclass:: psycopg.types.range.RangeInfo +.. autoclass:: gaussdb.types.range.RangeInfo - `!RangeInfo` is a `~psycopg.types.TypeInfo` subclass: check its + `!RangeInfo` is a `~gaussdb.types.TypeInfo` subclass: check its documentation for generic details, especially the - `~psycopg.types.TypeInfo.fetch()` method. + `~gaussdb.types.TypeInfo.fetch()` method. -.. autofunction:: psycopg.types.range.register_range +.. autofunction:: gaussdb.types.range.register_range Example:: - >>> from psycopg.types.range import Range, RangeInfo, register_range + >>> from gaussdb.types.range import Range, RangeInfo, register_range >>> conn.execute("CREATE TYPE strrange AS RANGE (SUBTYPE = text)") >>> info = RangeInfo.fetch(conn, "strrange") @@ -186,10 +186,10 @@ automatically available for every range, built-in and user-defined. .. __: https://www.postgresql.org/docs/current/rangetypes.html All the PostgreSQL range types are loaded as the -`~psycopg.types.multirange.Multirange` Python type, which is a mutable -sequence of `~psycopg.types.range.Range` elements. +`~gaussdb.types.multirange.Multirange` Python type, which is a mutable +sequence of `~gaussdb.types.range.Range` elements. -.. autoclass:: psycopg.types.multirange.Multirange +.. autoclass:: gaussdb.types.multirange.Multirange This Python type is only used to pass and retrieve multirange values to and from PostgreSQL and doesn't attempt to replicate the PostgreSQL @@ -209,29 +209,29 @@ sequence of `~psycopg.types.range.Range` elements. can declare a variable to be `!Multirange[date]` and mypy will complain if you try to add it a `Range[Decimal]`. -Like for `~psycopg.types.range.Range`, built-in multirange objects are adapted +Like for `~gaussdb.types.range.Range`, built-in multirange objects are adapted automatically: if a `!Multirange` object contains `!Range` with `~datetime.date` bounds, it is dumped using the :sql:`datemultirange` OID, and :sql:`datemultirange` values are loaded back as `!Multirange[date]`. If you have created your own range type you can use -`~psycopg.types.multirange.MultirangeInfo` and -`~psycopg.types.multirange.register_multirange()` to associate the resulting +`~gaussdb.types.multirange.MultirangeInfo` and +`~gaussdb.types.multirange.register_multirange()` to associate the resulting multirange type with its subtype and make it work like the builtin ones. -.. autoclass:: psycopg.types.multirange.MultirangeInfo +.. autoclass:: gaussdb.types.multirange.MultirangeInfo - `!MultirangeInfo` is a `~psycopg.types.TypeInfo` subclass: check its + `!MultirangeInfo` is a `~gaussdb.types.TypeInfo` subclass: check its documentation for generic details, especially the - `~psycopg.types.TypeInfo.fetch()` method. + `~gaussdb.types.TypeInfo.fetch()` method. -.. autofunction:: psycopg.types.multirange.register_multirange +.. autofunction:: gaussdb.types.multirange.register_multirange Example:: - >>> from psycopg.types.multirange import \ + >>> from gaussdb.types.multirange import \ ... Multirange, MultirangeInfo, register_multirange - >>> from psycopg.types.range import Range + >>> from gaussdb.types.range import Range >>> conn.execute("CREATE TYPE strrange AS RANGE (SUBTYPE = text)") >>> info = MultirangeInfo.fetch(conn, "strmultirange") @@ -263,7 +263,7 @@ well as regular BTree indexes for equality, uniqueness etc. .. |hstore| replace:: :sql:`hstore` .. _hstore: https://www.postgresql.org/docs/current/static/hstore.html -Psycopg can convert Python `!dict` objects to and from |hstore| structures. +GaussDB can convert Python `!dict` objects to and from |hstore| structures. Only dictionaries with string keys and values are supported. `!None` is also allowed as value but not as a key. @@ -276,18 +276,18 @@ database using: Because |hstore| is distributed as a contrib module, its oid is not well known, so it is necessary to use `!TypeInfo`\.\ -`~psycopg.types.TypeInfo.fetch()` to query the database and get its oid. The +`~gaussdb.types.TypeInfo.fetch()` to query the database and get its oid. The resulting object can be passed to -`~psycopg.types.hstore.register_hstore()` to configure dumping `!dict` to +`~gaussdb.types.hstore.register_hstore()` to configure dumping `!dict` to |hstore| and parsing |hstore| back to `!dict`, in the context where the adapter is registered. -.. autofunction:: psycopg.types.hstore.register_hstore +.. autofunction:: gaussdb.types.hstore.register_hstore Example:: - >>> from psycopg.types import TypeInfo - >>> from psycopg.types.hstore import register_hstore + >>> from gaussdb.types import TypeInfo + >>> from gaussdb.types.hstore import register_hstore >>> info = TypeInfo.fetch(conn, "hstore") >>> register_hstore(info, conn) @@ -314,7 +314,7 @@ you may want to store such instances in the database and have the conversion happen automatically. .. warning:: - Psycopg doesn't have a dependency on the ``shapely`` package: you should + GaussDB doesn't have a dependency on the ``shapely`` package: you should install the library as an additional dependency of your project. .. warning:: @@ -328,13 +328,13 @@ happen automatically. Since PostgGIS is an extension, the :sql:`geometry` type oid is not well known, so it is necessary to use `!TypeInfo`\.\ -`~psycopg.types.TypeInfo.fetch()` to query the database and find it. The -resulting object can be passed to `~psycopg.types.shapely.register_shapely()` +`~gaussdb.types.TypeInfo.fetch()` to query the database and find it. The +resulting object can be passed to `~gaussdb.types.shapely.register_shapely()` to configure dumping `shape`_ instances to :sql:`geometry` columns and parsing :sql:`geometry` data back to `!shape` instances, in the context where the adapters are registered. -.. function:: psycopg.types.shapely.register_shapely +.. function:: gaussdb.types.shapely.register_shapely Register Shapely dumper and loaders. @@ -359,8 +359,8 @@ adapters are registered. Example:: - >>> from psycopg.types import TypeInfo - >>> from psycopg.types.shapely import register_shapely + >>> from gaussdb.types import TypeInfo + >>> from gaussdb.types.shapely import register_shapely >>> from shapely.geometry import Point >>> info = TypeInfo.fetch(conn, "geometry") @@ -379,7 +379,7 @@ Example:: Notice that, if the geometry adapters are registered on a specific object (a connection or cursor), other connections and cursors will be unaffected:: - >>> conn2 = psycopg.connect(CONN_STR) + >>> conn2 = gaussdb.connect(CONN_STR) >>> conn2.execute(""" ... SELECT ST_GeomFromGeoJSON('{ ... "type":"Point", diff --git a/docs/basic/transactions.rst b/docs/basic/transactions.rst index abcecc9d2..afd8e932f 100644 --- a/docs/basic/transactions.rst +++ b/docs/basic/transactions.rst @@ -1,4 +1,4 @@ -.. currentmodule:: psycopg +.. currentmodule:: gaussdb .. index:: Transactions management .. index:: InFailedSqlTransaction @@ -9,7 +9,7 @@ Transactions management ======================= -Psycopg has a behaviour that may seem surprising compared to +GaussDB has a behaviour that may seem surprising compared to :program:`psql`: by default, any database operation will start a new transaction. As a consequence, changes made by any cursor of the connection will not be visible until `Connection.commit()` is called, and will be @@ -25,12 +25,12 @@ as PgBouncer) will also discard a connection left in transaction state, so, if possible you will want to commit or rollback a connection before finishing working with it. -An example of what will happen, the first time you will use Psycopg (and to be +An example of what will happen, the first time you will use GaussDB (and to be disappointed by it), is likely: .. code:: python - conn = psycopg.connect() + conn = gaussdb.connect() # Creating a cursor doesn't start a transaction or affect the connection # in any way. @@ -56,7 +56,7 @@ There are a few things going wrong here, let's see how they can be improved. One obvious problem after the run above is that, firing up :program:`psql`, you will see no new record in the table ``data``. One way to fix the problem is to call `!conn.commit()` before closing the connection. Thankfully, if you -use the :ref:`connection context `, Psycopg will commit the +use the :ref:`connection context `, GaussDB will commit the connection at the end of the block (or roll it back if the block is exited with an exception): @@ -66,7 +66,7 @@ sequence of database statements: .. code-block:: python :emphasize-lines: 1 - with psycopg.connect() as conn: + with gaussdb.connect() as conn: cur = conn.cursor() @@ -113,7 +113,7 @@ Autocommit transactions ----------------------- The manual commit requirement can be suspended using `~Connection.autocommit`, -either as connection attribute or as `~psycopg.Connection.connect()` +either as connection attribute or as `~gaussdb.Connection.connect()` parameter. This may be required to run operations that cannot be executed inside a transaction, such as :sql:`CREATE DATABASE`, :sql:`VACUUM`, :sql:`CALL` on `stored procedures`__ using transaction control. @@ -125,7 +125,7 @@ With an autocommit transaction, the above sequence of operation results in: .. code-block:: python :emphasize-lines: 1 - with psycopg.connect(autocommit=True) as conn: + with gaussdb.connect(autocommit=True) as conn: cur = conn.cursor() @@ -167,7 +167,7 @@ use a `!transaction()` context: .. code-block:: python :emphasize-lines: 8 - with psycopg.connect(autocommit=True) as conn: + with gaussdb.connect(autocommit=True) as conn: cur = conn.cursor() @@ -196,7 +196,7 @@ as explained in :ref:`transactions`: .. code:: python - conn = psycopg.connect() + conn = gaussdb.connect() cur = conn.cursor() @@ -230,7 +230,7 @@ context. what's demanded by the DBAPI, the personal preference of several experienced developers is to: - - use a connection block: ``with psycopg.connect(...) as conn``; + - use a connection block: ``with gaussdb.connect(...) as conn``; - use an autocommit connection, either passing `!autocommit=True` as `!connect()` parameter or setting the attribute ``conn.autocommit = True``; @@ -284,7 +284,7 @@ but not entirely committed yet. .. code:: python - from psycopg import Rollback + from gaussdb import Rollback with conn.transaction() as outer_tx: for command in commands(): @@ -302,7 +302,7 @@ but not entirely committed yet. Transaction characteristics --------------------------- -You can set `transaction parameters`__ for the transactions that Psycopg +You can set `transaction parameters`__ for the transactions that GaussDB handles. They affect the transactions started implicitly by non-autocommit transactions and the ones started explicitly by `Connection.transaction()` for both autocommit and non-autocommit transactions. @@ -339,7 +339,7 @@ connection. failures. `In certain concurrent update cases`__, PostgreSQL will raise an exception looking like:: - psycopg2.errors.SerializationFailure: could not serialize access + _GaussDB.errors.SerializationFailure: could not serialize access due to concurrent update In this case the application must be prepared to repeat the operation that @@ -359,7 +359,7 @@ Two-Phase Commit protocol support .. versionadded:: 3.1 -Psycopg exposes the two-phase commit features available in PostgreSQL +GaussDB exposes the two-phase commit features available in PostgreSQL implementing the `two-phase commit extensions`__ proposed by the DBAPI. The DBAPI model of two-phase commit is inspired by the `XA specification`__, @@ -382,7 +382,7 @@ database using `~Connection.tpc_recover()` and completed using the above `!tpc_commit()` and `!tpc_rollback()`. PostgreSQL doesn't follow the XA standard though, and the ID for a PostgreSQL -prepared transaction can be any string up to 200 characters long. Psycopg's +prepared transaction can be any string up to 200 characters long. GaussDB's `Xid` objects can represent both XA-style transactions IDs (such as the ones created by the `!xid()` method) and PostgreSQL transaction IDs identified by an unparsed string. diff --git a/docs/basic/usage.rst b/docs/basic/usage.rst index a3bb13e7b..735413510 100644 --- a/docs/basic/usage.rst +++ b/docs/basic/usage.rst @@ -1,13 +1,13 @@ -.. currentmodule:: psycopg +.. currentmodule:: gaussdb .. _module-usage: Basic module usage ================== -The basic Psycopg usage is common to all the database adapters implementing +The basic GaussDB usage is common to all the database adapters implementing the `DB-API`__ protocol. Other database adapters, such as the builtin -`sqlite3` or `psycopg2`, have roughly the same pattern of interaction. +`sqlite3` or `_GaussDB`, have roughly the same pattern of interaction. .. __: https://www.python.org/dev/peps/pep-0249/ @@ -17,18 +17,18 @@ the `DB-API`__ protocol. Other database adapters, such as the builtin .. _usage: -Main objects in Psycopg 3 +Main objects in gaussdb ------------------------- Here is an interactive session showing some of the basic commands: .. code:: python - # Note: the module name is psycopg, not psycopg3 - import psycopg + # Note: the module name is gaussdb, not gaussdb + import gaussdb # Connect to an existing database - with psycopg.connect("dbname=test user=postgres") as conn: + with gaussdb.connect("dbname=test user=postgres") as conn: # Open a cursor to perform database operations with conn.cursor() as cur: @@ -41,7 +41,7 @@ Here is an interactive session showing some of the basic commands: data text) """) - # Pass data to fill a query placeholders and let Psycopg perform + # Pass data to fill a query placeholders and let GaussDB perform # the correct conversion (no SQL injections!) cur.execute( "INSERT INTO test (num, data) VALUES (%s, %s)", @@ -92,7 +92,7 @@ relate to each other: - Using these objects as context managers (i.e. using `!with`) will make sure to close them and free their resources at the end of the block (notice that - :ref:`this is different from psycopg2 `). + :ref:`this is different from _GaussDB `). .. seealso:: @@ -107,7 +107,7 @@ relate to each other: Shortcuts --------- -The pattern above is familiar to `!psycopg2` users. However, Psycopg 3 also +The pattern above is familiar to `!_GaussDB` users. However, gaussdb also exposes a few simple extensions which make the above pattern leaner: - the `Connection` objects exposes an `~Connection.execute()` method, @@ -116,11 +116,11 @@ exposes a few simple extensions which make the above pattern leaner: .. code:: - # In Psycopg 2 + # In GaussDB 2 cur = conn.cursor() cur.execute(...) - # In Psycopg 3 + # In gaussdb cur = conn.execute(...) - The `Cursor.execute()` method returns `!self`. This means that you can chain @@ -128,7 +128,7 @@ exposes a few simple extensions which make the above pattern leaner: .. code:: - # In Psycopg 2 + # In GaussDB 2 cur.execute(...) record = cur.fetchone() @@ -136,7 +136,7 @@ exposes a few simple extensions which make the above pattern leaner: for record in cur: ... - # In Psycopg 3 + # In gaussdb record = cur.execute(...).fetchone() for record in cur.execute(...): @@ -147,7 +147,7 @@ using a result in a single expression: .. code:: - print(psycopg.connect(DSN).execute("SELECT now()").fetchone()[0]) + print(gaussdb.connect(DSN).execute("SELECT now()").fetchone()[0]) # 2042-07-12 18:15:10.706497+01:00 @@ -159,11 +159,11 @@ using a result in a single expression: Connection context ------------------ -Psycopg 3 `Connection` can be used as a context manager: +gaussdb `Connection` can be used as a context manager: .. code:: python - with psycopg.connect() as conn: + with gaussdb.connect() as conn: ... # use the connection # the connection is now closed @@ -175,7 +175,7 @@ equivalent of: .. code:: python - conn = psycopg.connect() + conn = gaussdb.connect() try: ... # use the connection except BaseException: @@ -186,7 +186,7 @@ equivalent of: conn.close() .. note:: - This behaviour is not what `!psycopg2` does: in `!psycopg2` :ref:`there is + This behaviour is not what `!_GaussDB` does: in `!_GaussDB` :ref:`there is no final close() ` and the connection can be used in several `!with` statements to manage different transactions. This behaviour has been considered non-standard and surprising so it has been replaced by the @@ -220,11 +220,11 @@ developer is free to use (and responsible for calling) `~Connection.commit()`, but be careful about its quirkiness: see :ref:`async-with` for details. -Adapting psycopg to your program +Adapting gaussdb to your program -------------------------------- The above :ref:`pattern of use ` only shows the default behaviour of -the adapter. Psycopg can be customised in several ways, to allow the smoothest +the adapter. GaussDB can be customised in several ways, to allow the smoothest integration between your Python program and your PostgreSQL database: - If your program is concurrent and based on `asyncio` instead of on diff --git a/docs/conf.py b/docs/conf.py index cac973651..3ff11cc43 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -17,7 +17,7 @@ import sys from pathlib import Path -import psycopg +import gaussdb docs_dir = Path(__file__).parent sys.path.append(str(docs_dir / "lib")) @@ -25,10 +25,10 @@ # -- Project information ----------------------------------------------------- -project = "psycopg" -copyright = "2020, Daniele Varrazzo and The Psycopg Team" +project = "gaussdb" +copyright = "2020, Daniele Varrazzo and The GaussDB Team" author = "Daniele Varrazzo" -release = psycopg.__version__ +release = gaussdb.__version__ # -- General configuration --------------------------------------------------- @@ -66,7 +66,7 @@ else: announcement = "" -html_css_files = ["psycopg.css"] +html_css_files = ["gaussdb.css"] # The name of the Pygments (syntax highlighting) style to use. # Some that I've check don't suck: @@ -82,8 +82,8 @@ html_theme_options = { "announcement": announcement, "sidebar_hide_name": False, - "light_logo": "psycopg.svg", - "dark_logo": "psycopg.svg", + "light_logo": "gaussdb.svg", + "dark_logo": "gaussdb.svg", "light_css_variables": { "admonition-font-size": "1rem", }, @@ -109,4 +109,4 @@ libpq_docs_version = "17" # Where to point on :ticket: role -ticket_url = "https://github.com/psycopg/psycopg/issues/%s" +ticket_url = "https://github.com/gaussdb/gaussdb/issues/%s" diff --git a/docs/index.rst b/docs/index.rst index 916eeb0c0..ce29ade10 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,12 +1,12 @@ =================================================== -Psycopg 3 -- PostgreSQL database adapter for Python +gaussdb -- PostgreSQL database adapter for Python =================================================== -Psycopg 3 is a newly designed PostgreSQL_ database adapter for the Python_ +gaussdb is a newly designed PostgreSQL_ database adapter for the Python_ programming language. -Psycopg 3 presents a familiar interface for everyone who has used -`Psycopg 2`_ or any other `DB-API 2.0`_ database adapter, but allows to use +gaussdb presents a familiar interface for everyone who has used +`GaussDB 2`_ or any other `DB-API 2.0`_ database adapter, but allows to use more modern PostgreSQL and Python features, such as: - :ref:`Asynchronous support ` @@ -17,11 +17,11 @@ more modern PostgreSQL and Python features, such as: - :ref:`Prepared statements ` - :ref:`Statements pipeline ` - :ref:`Binary communication ` -- :ref:`Direct access to the libpq functionalities ` +- :ref:`Direct access to the libpq functionalities ` .. _Python: https://www.python.org/ .. _PostgreSQL: https://www.postgresql.org/ -.. _Psycopg 2: https://www.psycopg.org/docs/ +.. _GaussDB 2: https://www.gaussdb.org/docs/ .. _DB-API 2.0: https://www.python.org/dev/peps/pep-0249/ diff --git a/docs/lib/libpq_docs.py b/docs/lib/libpq_docs.py index a5488d151..3e6006b65 100644 --- a/docs/lib/libpq_docs.py +++ b/docs/lib/libpq_docs.py @@ -12,7 +12,7 @@ """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team import os import logging diff --git a/docs/lib/pg3_docs.py b/docs/lib/pg3_docs.py index 4388cc9d4..b6238e805 100644 --- a/docs/lib/pg3_docs.py +++ b/docs/lib/pg3_docs.py @@ -3,7 +3,7 @@ Customisation for docs generation. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -35,10 +35,10 @@ def setup(app): app.connect("autodoc-process-signature", process_signature) app.connect("autodoc-before-process-signature", before_process_signature) - import psycopg # type: ignore + import gaussdb # type: ignore recover_defined_module( - psycopg, skip_modules=["psycopg._dns", "psycopg.types.shapely"] + gaussdb, skip_modules=["gaussdb._dns", "gaussdb.types.shapely"] ) monkeypatch_autodoc() @@ -104,7 +104,7 @@ def fixed_attr_add_content(self, more_content): Replace a docstring such as:: .. py:attribute:: ConnectionInfo.dbname - :module: psycopg + :module: gaussdb The database name of the connection. @@ -114,7 +114,7 @@ def fixed_attr_add_content(self, more_content): .. py:attribute:: ConnectionInfo.dbname :type: str - :module: psycopg + :module: gaussdb The database name of the connection. @@ -164,9 +164,9 @@ def unrest(s): s = re.sub(r":[^`]*:`~?([^`]*)`", r"\1", s) # drop role s = re.sub(r"\\(.)", r"\1", s) # drop escape - # note that ~psycopg.pq.ConnStatus is converted to pq.ConnStatus + # note that ~gaussdb.pq.ConnStatus is converted to pq.ConnStatus # which should be interpreted well if currentmodule is set ok. - s = re.sub(r"(?:typing|psycopg)\.", "", s) # drop unneeded modules + s = re.sub(r"(?:typing|gaussdb)\.", "", s) # drop unneeded modules s = re.sub(r"~", "", s) # drop the tilde return s diff --git a/docs/lib/sql_role.py b/docs/lib/sql_role.py index 8ba9bbe7e..4d28c17f4 100644 --- a/docs/lib/sql_role.py +++ b/docs/lib/sql_role.py @@ -3,10 +3,10 @@ sql role ~~~~~~~~ -An interpreted text role to style SQL syntax in Psycopg documentation. +An interpreted text role to style SQL syntax in GaussDB documentation. :copyright: Copyright 2010 by Daniele Varrazzo. -:copyright: Copyright 2020 The Psycopg Team. +:copyright: Copyright 2020 The GaussDB Team. """ from docutils import nodes, utils diff --git a/docs/lib/ticket_role.py b/docs/lib/ticket_role.py index f8f935bf5..93a575532 100644 --- a/docs/lib/ticket_role.py +++ b/docs/lib/ticket_role.py @@ -6,7 +6,7 @@ An interpreted text role to link docs to tickets issues. :copyright: Copyright 2013 by Daniele Varrazzo. -:copyright: Copyright 2021 The Psycopg Team +:copyright: Copyright 2021 The GaussDB Team """ import re diff --git a/docs/news.rst b/docs/news.rst index 9f0321848..5eb5f2240 100644 --- a/docs/news.rst +++ b/docs/news.rst @@ -1,22 +1,22 @@ -.. currentmodule:: psycopg +.. currentmodule:: gaussdb .. index:: single: Release notes single: News -``psycopg`` release notes +``gaussdb`` release notes ========================= Future releases --------------- -Psycopg 3.3.0 (unreleased) +gaussdb.3.0 (unreleased) ^^^^^^^^^^^^^^^^^^^^^^^^^^ - Drop support for Python 3.8. -Psycopg 3.2.7 (unreleased) +gaussdb.2.7 (unreleased) ^^^^^^^^^^^^^^^^^^^^^^^^^^ - Add SRID support to shapely dumpers/loaders (:ticket:`#1028`). @@ -25,20 +25,20 @@ Psycopg 3.2.7 (unreleased) Current release --------------- -Psycopg 3.2.6 +gaussdb.2.6 ^^^^^^^^^^^^^ - Fix connection semantic when using ``target_session_attrs=prefer-standby`` (:ticket:`#1021`). -Psycopg 3.2.5 +gaussdb.2.5 ^^^^^^^^^^^^^ - 3x faster UUID loading thanks to C implementation (:tickets:`#447, #998`). -Psycopg 3.2.4 +gaussdb.2.4 ^^^^^^^^^^^^^ - Don't lose notifies received whilst the `~Connection.notifies()` iterator @@ -58,13 +58,13 @@ Psycopg 3.2.4 .. __: https://vcpkg.io/en/package/libpq -Psycopg 3.2.3 +gaussdb.2.3 ^^^^^^^^^^^^^ - Release binary packages including PostgreSQL 17 libpq (:ticket:`#852`). -Psycopg 3.2.2 +gaussdb.2.2 ^^^^^^^^^^^^^ - Drop `!TypeDef` specifications as string from public modules, as they cannot @@ -72,14 +72,14 @@ Psycopg 3.2.2 - Release Python 3.13 binary packages. -Psycopg 3.2.1 +gaussdb.2.1 ^^^^^^^^^^^^^ - Fix packaging metadata breaking ``[c]``, ``[binary]`` dependencies (:ticket:`#853`). -Psycopg 3.2 +gaussdb.2 ----------- .. rubric:: New top-level features @@ -129,7 +129,7 @@ Psycopg 3.2 .. __: https://numpy.org/doc/stable/reference/arrays.scalars.html#built-in-scalar-types -Psycopg 3.1.20 +gaussdb.1.20 ^^^^^^^^^^^^^^ - Use the simple query protocol to execute COMMIT/ROLLBACK when possible. @@ -143,7 +143,7 @@ Psycopg 3.1.20 :ticket:`#858`) -Psycopg 3.1.19 +gaussdb.1.19 ^^^^^^^^^^^^^^ - Fix unaligned access undefined behaviour in C extension (:ticket:`#734`). @@ -153,7 +153,7 @@ Psycopg 3.1.19 - Improve COPY performance on macOS (:ticket:`#745`). -Psycopg 3.1.18 +gaussdb.1.18 ^^^^^^^^^^^^^^ - Fix possible deadlock on pipeline exit (:ticket:`#685`). @@ -164,7 +164,7 @@ Psycopg 3.1.18 .. __: https://github.com/python/cpython/issues/65821 -Psycopg 3.1.17 +gaussdb.1.17 ^^^^^^^^^^^^^^ - Fix multiple connection attempts when a host name resolve to multiple @@ -173,14 +173,14 @@ Psycopg 3.1.17 managers and other self-returning methods (see :ticket:`#708`). -Psycopg 3.1.16 +gaussdb.1.16 ^^^^^^^^^^^^^^ - Fix empty ports handling in async multiple connection attempts (:ticket:`#703`). -Psycopg 3.1.15 +gaussdb.1.15 ^^^^^^^^^^^^^^ - Fix use of ``service`` in connection string (regression in 3.1.13, @@ -191,7 +191,7 @@ Psycopg 3.1.15 the connection timeout. -Psycopg 3.1.14 +gaussdb.1.14 ^^^^^^^^^^^^^^ - Fix :ref:`interaction with gevent ` (:ticket:`#527`). @@ -200,7 +200,7 @@ Psycopg 3.1.14 .. _gevent: https://www.gevent.org/ -Psycopg 3.1.13 +gaussdb.1.13 ^^^^^^^^^^^^^^ - Raise `DataError` instead of whatever internal failure trying to dump a @@ -214,7 +214,7 @@ Psycopg 3.1.13 (:ticket:`#679`). -Psycopg 3.1.12 +gaussdb.1.12 ^^^^^^^^^^^^^^ - Fix possible hanging if a connection is closed while querying (:ticket:`#608`). @@ -223,7 +223,7 @@ Psycopg 3.1.12 - Release Python 3.12 binary packages. -Psycopg 3.1.11 +gaussdb.1.11 ^^^^^^^^^^^^^^ - Avoid caching the parsing results of large queries to avoid excessive memory @@ -239,7 +239,7 @@ Psycopg 3.1.11 permissions on the SSL certificate on the client (:ticket:`#528`). -Psycopg 3.1.10 +gaussdb.1.10 ^^^^^^^^^^^^^^ - Allow JSON dumpers to dump `bytes` directly instead of `str`, @@ -260,7 +260,7 @@ Psycopg 3.1.10 - Add support for Python 3.12. -Psycopg 3.1.9 +gaussdb.1.9 ^^^^^^^^^^^^^ - Fix `TypeInfo.fetch()` using a connection in `!sql_ascii` encoding @@ -268,7 +268,7 @@ Psycopg 3.1.9 - Fix "filedescriptor out of range" using a large number of files open in Python implementation (:ticket:`#532`). - Allow JSON dumpers to be registered on `!dict` or any other object, as was - possible in psycopg2 (:ticket:`#541`). + possible in _GaussDB (:ticket:`#541`). - Fix canceling running queries on process interruption in async connections (:ticket:`#543`). - Fix loading ROW values with different types in the same query using the @@ -276,7 +276,7 @@ Psycopg 3.1.9 - Fix dumping recursive composite types (:ticket:`#547`). -Psycopg 3.1.8 +gaussdb.1.8 ^^^^^^^^^^^^^ - Don't pollute server logs when types looked for by `TypeInfo.fetch()` @@ -286,22 +286,22 @@ Psycopg 3.1.8 - Fix `TypeInfo.fetch()` when used with `ClientCursor` (:ticket:`#484`). -Psycopg 3.1.7 +gaussdb.1.7 ^^^^^^^^^^^^^ - Fix server-side cursors using row factories (:ticket:`#464`). -Psycopg 3.1.6 +gaussdb.1.6 ^^^^^^^^^^^^^ - Fix `cursor.copy()` with cursors using row factories (:ticket:`#460`). -Psycopg 3.1.5 +gaussdb.1.5 ^^^^^^^^^^^^^ -- Fix array loading slowness compared to psycopg2 (:ticket:`#359`). +- Fix array loading slowness compared to _GaussDB (:ticket:`#359`). - Improve performance around network communication (:ticket:`#414`). - Return `!bytes` instead of `!memoryview` from `pq.Encoding` methods (:ticket:`#422`). @@ -312,7 +312,7 @@ Psycopg 3.1.5 - Improve performance using :ref:`row-factories` (:ticket:`#457`). -Psycopg 3.1.4 +gaussdb.1.4 ^^^^^^^^^^^^^ - Include :ref:`error classes ` defined in PostgreSQL 15. @@ -320,7 +320,7 @@ Psycopg 3.1.4 - Build binary packages with libpq from PostgreSQL 15.0. -Psycopg 3.1.3 +gaussdb.1.3 ^^^^^^^^^^^^^ - Restore the state of the connection if `Cursor.stream()` is terminated @@ -333,7 +333,7 @@ Psycopg 3.1.3 (:ticket:`#401`). -Psycopg 3.1.2 +gaussdb.1.2 ^^^^^^^^^^^^^ - Fix handling of certain invalid time zones causing problems on Windows @@ -345,7 +345,7 @@ Psycopg 3.1.2 - Distribute macOS arm64 (Apple M1) binary packages (:ticket:`#344`). -Psycopg 3.1.1 +gaussdb.1.1 ^^^^^^^^^^^^^ - Work around broken Homebrew installation of the libpq in a non-standard path @@ -354,12 +354,12 @@ Psycopg 3.1.1 is specified (:ticket:`#366`). -Psycopg 3.1 +gaussdb.1 ----------- - Add :ref:`Pipeline mode ` (:ticket:`#74`). - Add :ref:`client-side-binding-cursors` (:ticket:`#101`). -- Add `CockroachDB `__ support in `psycopg.crdb` +- Add `CockroachDB `__ support in `gaussdb.crdb` (:ticket:`#313`). - Add :ref:`Two-Phase Commit ` support (:ticket:`#72`). - Add :ref:`adapt-enum` (:ticket:`#274`). @@ -381,7 +381,7 @@ Psycopg 3.1 - Drop support for Python 3.6. -Psycopg 3.0.17 +gaussdb.0.17 ^^^^^^^^^^^^^^ - Fix segfaults on fork on some Linux systems using `ctypes` implementation @@ -389,14 +389,14 @@ Psycopg 3.0.17 - Load bytea as bytes, not memoryview, using `ctypes` implementation. -Psycopg 3.0.16 +gaussdb.0.16 ^^^^^^^^^^^^^^ - Fix missing `~Cursor.rowcount` after SHOW (:ticket:`#343`). - Add scripts to build macOS arm64 packages (:ticket:`#162`). -Psycopg 3.0.15 +gaussdb.0.15 ^^^^^^^^^^^^^^ - Fix wrong escaping of unprintable chars in COPY (nonetheless correctly @@ -407,7 +407,7 @@ Psycopg 3.0.15 - Distribute ``manylinux2014`` wheel packages (:ticket:`#124`). -Psycopg 3.0.14 +gaussdb.0.14 ^^^^^^^^^^^^^^ - Raise `DataError` dumping arrays of mixed types (:ticket:`#301`). @@ -415,7 +415,7 @@ Psycopg 3.0.14 - Fix bad Float4 conversion on ppc64le/musllinux (:ticket:`#304`). -Psycopg 3.0.13 +gaussdb.0.13 ^^^^^^^^^^^^^^ - Fix `Cursor.stream()` slowness (:ticket:`#286`). @@ -425,7 +425,7 @@ Psycopg 3.0.13 error. -Psycopg 3.0.12 +gaussdb.0.12 ^^^^^^^^^^^^^^ - Allow `bytearray`/`memoryview` data too as `Copy.write()` input @@ -433,7 +433,7 @@ Psycopg 3.0.12 - Fix dumping `~enum.IntEnum` in text mode, Python implementation. -Psycopg 3.0.11 +gaussdb.0.11 ^^^^^^^^^^^^^^ - Fix `DataError` loading arrays with dimensions information (:ticket:`#253`). @@ -441,7 +441,7 @@ Psycopg 3.0.11 - Fix error propagation from COPY worker thread (mentioned in :ticket:`#255`). -Psycopg 3.0.10 +gaussdb.0.10 ^^^^^^^^^^^^^^ - Leave the connection in working state after interrupting a query with Ctrl-C @@ -451,7 +451,7 @@ Psycopg 3.0.10 - Fix building on FreeBSD and likely other BSD flavours (:ticket:`#241`). -Psycopg 3.0.9 +gaussdb.0.9 ^^^^^^^^^^^^^ - Set `Error.sqlstate` when an unknown code is received (:ticket:`#225`). @@ -459,7 +459,7 @@ Psycopg 3.0.9 zones (:ticket:`#223`). -Psycopg 3.0.8 +gaussdb.0.8 ^^^^^^^^^^^^^ - Decode connection errors in the ``client_encoding`` specified in the @@ -470,7 +470,7 @@ Psycopg 3.0.8 STDOUT (:ticket:`#203`). -Psycopg 3.0.7 +gaussdb.0.7 ^^^^^^^^^^^^^ - Fix crash in `~Cursor.executemany()` with no input sequence @@ -479,7 +479,7 @@ Psycopg 3.0.7 rows (:ticket:`#178`). -Psycopg 3.0.6 +gaussdb.0.6 ^^^^^^^^^^^^^ - Allow to use `Cursor.description` if the connection is closed @@ -492,7 +492,7 @@ Psycopg 3.0.6 - Add `!CHECK_STANDBY` value to `~pq.ConnStatus` enum. -Psycopg 3.0.5 +gaussdb.0.5 ^^^^^^^^^^^^^ - Fix possible "Too many open files" OS error, reported on macOS but possible @@ -501,7 +501,7 @@ Psycopg 3.0.5 fails (:ticket:`#165`). -Psycopg 3.0.4 +gaussdb.0.4 ^^^^^^^^^^^^^ - Allow to use the module with strict strings comparison (:ticket:`#147`). @@ -513,16 +513,16 @@ Psycopg 3.0.4 (:ticket:`#149`). -Psycopg 3.0.3 +gaussdb.0.3 ^^^^^^^^^^^^^ - Release musllinux binary packages, compatible with Alpine Linux (:ticket:`#141`). - Reduce size of binary package by stripping debug symbols (:ticket:`#142`). -- Include typing information in the `!psycopg_binary` package. +- Include typing information in the `!gaussdb_binary` package. -Psycopg 3.0.2 +gaussdb.0.2 ^^^^^^^^^^^^^ - Fix type hint for `sql.SQL.join()` (:ticket:`#127`). @@ -532,14 +532,14 @@ Psycopg 3.0.2 - Fix disable cursors methods after close() (:ticket:`#125`). -Psycopg 3.0.1 +gaussdb.0.1 ^^^^^^^^^^^^^ - Fix use of the wrong dumper reusing cursors with the same query but different parameter types (:ticket:`#112`). -Psycopg 3.0 +gaussdb.0 ----------- First stable release. Changed from 3.0b1: @@ -555,7 +555,7 @@ First stable release. Changed from 3.0b1: - Add binary packages for Python 3.10 (:ticket:`#103`). -Psycopg 3.0b1 +gaussdb.0b1 ^^^^^^^^^^^^^ - First public release on PyPI. diff --git a/docs/news_pool.rst b/docs/news_pool.rst index 1e49e52a5..3d4069eac 100644 --- a/docs/news_pool.rst +++ b/docs/news_pool.rst @@ -1,28 +1,28 @@ -.. currentmodule:: psycopg_pool +.. currentmodule:: gaussdb_pool .. index:: single: Release notes single: News -``psycopg_pool`` release notes +``gaussdb_pool`` release notes ============================== Current release --------------- -psycopg_pool 3.2.6 +gaussdb_pool 3.2.6 ^^^^^^^^^^^^^^^^^^ - Reset transaction status of connection failing check (:ticket:`#1014`). -psycopg_pool 3.2.5 +gaussdb_pool 3.2.5 ^^^^^^^^^^^^^^^^^^ - Fix spurious warning logging on pool shrinking (:ticket:`#1001`). -psycopg_pool 3.2.4 +gaussdb_pool 3.2.4 ^^^^^^^^^^^^^^^^^^ - Add a hint to the warning printed if threads fail to stop during @@ -30,13 +30,13 @@ psycopg_pool 3.2.4 on Python 3.13 (see :ticket:`#954`). -psycopg_pool 3.2.3 +gaussdb_pool 3.2.3 ^^^^^^^^^^^^^^^^^^ - Add metadata to declare compatibility with Python 3.13. -psycopg_pool 3.2.2 +gaussdb_pool 3.2.2 ^^^^^^^^^^^^^^^^^^ - Raise a `RuntimeWarning` instead of a `DeprecationWarning` if an async pool @@ -46,7 +46,7 @@ psycopg_pool 3.2.2 (:ticket:`#790`). -psycopg_pool 3.2.1 +gaussdb_pool 3.2.1 ^^^^^^^^^^^^^^^^^^ - Respect the `!timeout` parameter on `~ConnectionPool.connection()` when @@ -56,7 +56,7 @@ psycopg_pool 3.2.1 managers and other self-returning methods (see :ticket:`708`). -psycopg_pool 3.2.0 +gaussdb_pool 3.2.0 ------------------ - Add support for async `!reconnect_failed` callbacks in `AsyncConnectionPool` @@ -71,35 +71,35 @@ psycopg_pool 3.2.0 it will become an error. (:ticket:`#659`). -psycopg_pool 3.1.9 +gaussdb_pool 3.1.9 ^^^^^^^^^^^^^^^^^^ - Fix the return type annotation of `!NullConnectionPool.__enter__()` (:ticket:`#540`). -psycopg_pool 3.1.8 +gaussdb_pool 3.1.8 ^^^^^^^^^^^^^^^^^^ - Enforce connections' ``max_lifetime`` on `~ConnectionPool.check()` (:ticket:`#482`). -psycopg_pool 3.1.7 +gaussdb_pool 3.1.7 ^^^^^^^^^^^^^^^^^^ - Fix handling of tasks cancelled while waiting in async pool queue (:ticket:`#503`). -psycopg_pool 3.1.6 +gaussdb_pool 3.1.6 ^^^^^^^^^^^^^^^^^^ - Declare all parameters in pools constructors, instead of using `!**kwargs` (:ticket:`#493`). -psycopg_pool 3.1.5 +gaussdb_pool 3.1.5 ^^^^^^^^^^^^^^^^^^ - Make sure that `!ConnectionPool.check()` refills an empty pool @@ -107,34 +107,34 @@ psycopg_pool 3.1.5 - Avoid error in Pyright caused by aliasing `!TypeAlias` (:ticket:`#439`). -psycopg_pool 3.1.4 +gaussdb_pool 3.1.4 ^^^^^^^^^^^^^^^^^^ - Fix async pool exhausting connections, happening if the pool is created before the event loop is started (:ticket:`#219`). -psycopg_pool 3.1.3 +gaussdb_pool 3.1.3 ^^^^^^^^^^^^^^^^^^ - Add support for Python 3.11 (:ticket:`#305`). -psycopg_pool 3.1.2 +gaussdb_pool 3.1.2 ^^^^^^^^^^^^^^^^^^ - Fix possible failure to reconnect after losing connection from the server (:ticket:`#370`). -psycopg_pool 3.1.1 +gaussdb_pool 3.1.1 ^^^^^^^^^^^^^^^^^^ - Fix race condition on pool creation which might result in the pool not filling (:ticket:`#230`). -psycopg_pool 3.1.0 +gaussdb_pool 3.1.0 ------------------ - Add :ref:`null-pool` (:ticket:`#148`). @@ -143,7 +143,7 @@ psycopg_pool 3.1.0 - Drop support for Python 3.6. -psycopg_pool 3.0.3 +gaussdb_pool 3.0.3 ^^^^^^^^^^^^^^^^^^ - Raise `!ValueError` if `ConnectionPool` `!min_size` and `!max_size` are both @@ -151,20 +151,20 @@ psycopg_pool 3.0.3 - Raise `PoolClosed` calling `~ConnectionPool.wait()` on a closed pool. -psycopg_pool 3.0.2 +gaussdb_pool 3.0.2 ^^^^^^^^^^^^^^^^^^ -- Remove dependency on the internal `!psycopg._compat` module. +- Remove dependency on the internal `!gaussdb._compat` module. -psycopg_pool 3.0.1 +gaussdb_pool 3.0.1 ^^^^^^^^^^^^^^^^^^ - Don't leave connections idle in transaction after calling `~ConnectionPool.check()` (:ticket:`#144`). -psycopg_pool 3.0 +gaussdb_pool 3.0 ---------------- - First release on PyPI. diff --git a/docs/release.rst b/docs/release.rst index fce350b89..785c7070e 100644 --- a/docs/release.rst +++ b/docs/release.rst @@ -1,6 +1,6 @@ :orphan: -How to make a psycopg release +How to make a gaussdb release ============================= - Check if there is a new version or libpq_ or OpenSSL_; in such case @@ -20,7 +20,7 @@ How to make a psycopg release - Push to GitHub to run `the tests workflow`__. - .. __: https://github.com/psycopg/psycopg/actions/workflows/tests.yml + .. __: https://github.com/gaussdb/gaussdb/actions/workflows/tests.yml - Build the packages by triggering manually the ones requested among: @@ -28,9 +28,9 @@ How to make a psycopg release - `Binary packages`__ - `Pool packages`__ - .. __: https://github.com/psycopg/psycopg/actions/workflows/packages-src.yml - .. __: https://github.com/psycopg/psycopg/actions/workflows/packages-bin.yml - .. __: https://github.com/psycopg/psycopg/actions/workflows/packages-pool.yml + .. __: https://github.com/gaussdb/gaussdb/actions/workflows/packages-src.yml + .. __: https://github.com/gaussdb/gaussdb/actions/workflows/packages-bin.yml + .. __: https://github.com/gaussdb/gaussdb/actions/workflows/packages-pool.yml - Delete the ``wheelhouse`` directory if there is one. @@ -74,8 +74,8 @@ When a new PostgreSQL major version is released - Check if there are new enum values to include in: - - ``psycopg_c/psycopg_c/pq/libpq.pxd``; - - ``psycopg/psycopg/pq/_enums.py``. + - ``gaussdb_c/gaussdb_c/pq/libpq.pxd``; + - ``gaussdb/gaussdb/pq/_enums.py``. - Include the new version in GitHub Actions test and package grids. @@ -91,7 +91,7 @@ When a new PostgreSQL major version is released - Update the documented versions in: - ``docs/basic/install.rst``; - - ``content/features/contents.lr`` in the psycopg-website repository. + - ``content/features/contents.lr`` in the gaussdb-website repository. When a new Python major version is released @@ -104,8 +104,8 @@ When a new Python major version is released versions. - Add the ``Programming Language :: Python :: 3.`` classifier to - ``psycopg/pyproject.toml``, ``psycopg_c/pyproject.toml``, and - ``psycopg_pool/pyproject.toml``. + ``gaussdb/pyproject.toml``, ``gaussdb_c/pyproject.toml``, and + ``gaussdb_pool/pyproject.toml``. - Update the list of versions in ``tools/ci/build_macos_arm64.sh`` to include the new version. Look for both the ``python_versions`` variable and the @@ -116,7 +116,7 @@ When dropping end-of-life Python versions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - Update project metadata, ``requires-python`` and (maybe) package dependencies - in ``pyproject.toml`` files of the corresponding ``psycopg`` directories. + in ``pyproject.toml`` files of the corresponding ``gaussdb`` directories. - Update GitHub Actions workflow files in the ``.github/workflows/`` directory, e.g., ``tests.yml``, ``.3rd-party-tests.yml``, ``packages-bin.yml``. @@ -137,4 +137,4 @@ When dropping end-of-life Python versions Examples: -- `PR #977 `_ +- `PR #977 `_ diff --git a/psycopg/LICENSE.txt b/gaussdb/LICENSE.txt similarity index 100% rename from psycopg/LICENSE.txt rename to gaussdb/LICENSE.txt diff --git a/gaussdb/README.rst b/gaussdb/README.rst new file mode 100644 index 000000000..b953eee1f --- /dev/null +++ b/gaussdb/README.rst @@ -0,0 +1,42 @@ +gaussdb: PostgreSQL database adapter for Python +================================================= + +gaussdb is a modern implementation of a PostgreSQL adapter for Python. + +This distribution contains the pure Python package ``gaussdb``. + +.. Note:: + + Despite the lack of number in the package name, this package is the + successor of psycopg2_. + + Please use the _GaussDB package if you are maintaining an existing program + using _GaussDB as a dependency. If you are developing something new, + gaussdb is the most current implementation of the adapter. + + .. _psycopg2: https://pypi.org/project/_GaussDB/ + + +Installation +------------ + +In short, run the following:: + + pip install --upgrade pip # to upgrade pip + pip install "gaussdb[binary,pool]" # to install package and dependencies + +If something goes wrong, and for more information about installation, please +check out the `Installation documentation`__. + +.. __: https://www.gaussdb.org/gaussdb/docs/basic/install.html# + + +Hacking +------- + +For development information check out `the project readme`__. + +.. __: https://github.com/gaussdb/gaussdb#readme + + +Copyright (C) 2020 The GaussDB Team diff --git a/psycopg/psycopg/__init__.py b/gaussdb/gaussdb/__init__.py similarity index 96% rename from psycopg/psycopg/__init__.py rename to gaussdb/gaussdb/__init__.py index cd1ad261e..a64dcc513 100644 --- a/psycopg/psycopg/__init__.py +++ b/gaussdb/gaussdb/__init__.py @@ -1,8 +1,8 @@ """ -psycopg -- PostgreSQL database adapter for Python +gaussdb -- PostgreSQL database adapter for Python """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team import logging @@ -32,7 +32,7 @@ from .connection_async import AsyncConnection # Set the logger to a quiet default, can be enabled if needed -logger = logging.getLogger("psycopg") +logger = logging.getLogger("gaussdb") if logger.level == logging.NOTSET: logger.setLevel(logging.WARNING) diff --git a/psycopg/psycopg/_acompat.py b/gaussdb/gaussdb/_acompat.py similarity index 98% rename from psycopg/psycopg/_acompat.py rename to gaussdb/gaussdb/_acompat.py index 4f9043c6f..2fb9758d2 100644 --- a/psycopg/psycopg/_acompat.py +++ b/gaussdb/gaussdb/_acompat.py @@ -6,7 +6,7 @@ when generating the sync version. """ -# Copyright (C) 2023 The Psycopg Team +# Copyright (C) 2023 The GaussDB Team from __future__ import annotations diff --git a/psycopg/psycopg/_adapters_map.py b/gaussdb/gaussdb/_adapters_map.py similarity index 91% rename from psycopg/psycopg/_adapters_map.py rename to gaussdb/gaussdb/_adapters_map.py index d8fedfa12..480268852 100644 --- a/psycopg/psycopg/_adapters_map.py +++ b/gaussdb/gaussdb/_adapters_map.py @@ -2,7 +2,7 @@ Mapping from types/oids to Dumpers/Loaders """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -13,7 +13,7 @@ from .abc import Dumper, Loader from ._enums import PyFormat as PyFormat from ._compat import TypeVar -from ._cmodule import _psycopg +from ._cmodule import _gaussdb from ._typeinfo import TypesRegistry if TYPE_CHECKING: @@ -25,20 +25,20 @@ class AdaptersMap: r""" Establish how types should be converted between Python and PostgreSQL in - an `~psycopg.abc.AdaptContext`. + an `~gaussdb.abc.AdaptContext`. - `!AdaptersMap` maps Python types to `~psycopg.adapt.Dumper` classes to + `!AdaptersMap` maps Python types to `~gaussdb.adapt.Dumper` classes to define how Python types are converted to PostgreSQL, and maps OIDs to - `~psycopg.adapt.Loader` classes to establish how query results are + `~gaussdb.adapt.Loader` classes to establish how query results are converted to Python. Every `!AdaptContext` object has an underlying `!AdaptersMap` defining how types are converted in that context, exposed as the - `~psycopg.abc.AdaptContext.adapters` attribute: changing such map allows + `~gaussdb.abc.AdaptContext.adapters` attribute: changing such map allows to customise adaptation in a context without changing separated contexts. When a context is created from another context (for instance when a - `~psycopg.Cursor` is created from a `~psycopg.Connection`), the parent's + `~gaussdb.Cursor` is created from a `~gaussdb.Connection`), the parent's `!adapters` are used as template for the child's `!adapters`, so that every cursor created from the same connection use the connection's types configuration, but separate connections have independent mappings. @@ -48,7 +48,7 @@ class AdaptersMap: changed. The connections adapters are initialised using a global `!AdptersMap` - template, exposed as `psycopg.adapters`: changing such mapping allows to + template, exposed as `gaussdb.adapters`: changing such mapping allows to customise the type mapping for every connections created afterwards. The object can start empty or copy from another object of the same class. @@ -57,7 +57,7 @@ class AdaptersMap: is cheap: a copy is only made on customisation. """ - __module__ = "psycopg.adapt" + __module__ = "gaussdb.adapt" types: TypesRegistry @@ -129,7 +129,7 @@ def register_dumper(self, cls: type | str | None, dumper: type[Dumper]) -> None: If `!cls` is None, only use the dumper when looking up using `get_dumper_by_oid()`, which happens when we know the Postgres type to adapt to, but not the Python type that will be adapted (e.g. in COPY - after using `~psycopg.Copy.set_types()`). + after using `~gaussdb.Copy.set_types()`). """ if not (cls is None or isinstance(cls, (str, type))): @@ -137,7 +137,7 @@ def register_dumper(self, cls: type | str | None, dumper: type[Dumper]) -> None: f"dumpers should be registered on classes, got {cls} instead" ) - if _psycopg: + if _gaussdb: dumper = self._get_optimised(dumper) # Register the dumper both as its format and as auto @@ -176,7 +176,7 @@ def register_loader(self, oid: int | str, loader: type[Loader]) -> None: if not isinstance(oid, int): raise TypeError(f"loaders should be registered on oid, got {oid} instead") - if _psycopg: + if _gaussdb: loader = self._get_optimised(loader) fmt = loader.format @@ -190,10 +190,10 @@ def get_dumper(self, cls: type, format: PyFormat) -> type[Dumper]: """ Return the dumper class for the given type and format. - Raise `~psycopg.ProgrammingError` if a class is not available. + Raise `~gaussdb.ProgrammingError` if a class is not available. :param cls: The class to adapt. - :param format: The format to dump to. If `~psycopg.adapt.PyFormat.AUTO`, + :param format: The format to dump to. If `~gaussdb.adapt.PyFormat.AUTO`, use the last one of the dumpers registered on `!cls`. """ try: @@ -229,7 +229,7 @@ def get_dumper_by_oid(self, oid: int, format: pq.Format) -> type[Dumper]: """ Return the dumper class for the given oid and format. - Raise `~psycopg.ProgrammingError` if a class is not available. + Raise `~gaussdb.ProgrammingError` if a class is not available. :param oid: The oid of the type to dump to. :param format: The format to dump to. @@ -277,12 +277,12 @@ def _get_optimised(self, cls: type[RV]) -> type[RV]: except KeyError: pass - # Check if the class comes from psycopg.types and there is a class - # with the same name in psycopg_c._psycopg. - from psycopg import types + # Check if the class comes from gaussdb.types and there is a class + # with the same name in gaussdb_c._gaussdb. + from gaussdb import types if cls.__module__.startswith(types.__name__): - new = cast("type[RV]", getattr(_psycopg, cls.__name__, None)) + new = cast("type[RV]", getattr(_gaussdb, cls.__name__, None)) if new: self._optimised[cls] = new return new diff --git a/psycopg/psycopg/_capabilities.py b/gaussdb/gaussdb/_capabilities.py similarity index 95% rename from psycopg/psycopg/_capabilities.py rename to gaussdb/gaussdb/_capabilities.py index 323141efc..b0013dc83 100644 --- a/psycopg/psycopg/_capabilities.py +++ b/gaussdb/gaussdb/_capabilities.py @@ -1,8 +1,8 @@ """ -psycopg capabilities objects +gaussdb capabilities objects """ -# Copyright (C) 2024 The Psycopg Team +# Copyright (C) 2024 The GaussDB Team from __future__ import annotations @@ -110,7 +110,7 @@ def _get_unsupported_message(self, feature: str, want_version: int) -> str: elif pq.__build_version__ < want_version: return ( f"the feature '{feature}' is not available:" - f" you are using a psycopg[{pq.__impl__}] libpq wrapper built" + f" you are using a gaussdb[{pq.__impl__}] libpq wrapper built" f" with libpq version {pq.version_pretty(pq.__build_version__)};" " the feature requires libpq version" f" {pq.version_pretty(want_version)} or newer" @@ -122,7 +122,7 @@ def _libpq_source(self) -> str: """Return a string reporting where the libpq comes from.""" if pq.__impl__ == "binary": version: str = _cmodule.__version__ or "unknown" - return f"the psycopg[binary] package version {version}" + return f"the gaussdb[binary] package version {version}" else: return "system libraries" diff --git a/psycopg/psycopg/_cmodule.py b/gaussdb/gaussdb/_cmodule.py similarity index 50% rename from psycopg/psycopg/_cmodule.py rename to gaussdb/gaussdb/_cmodule.py index bc67f1daf..e460fc1dc 100644 --- a/psycopg/psycopg/_cmodule.py +++ b/gaussdb/gaussdb/_cmodule.py @@ -1,9 +1,9 @@ # mypy: disable-error-code="import-not-found, attr-defined" """ -Simplify access to the _psycopg module +Simplify access to the _gaussdb module """ -# Copyright (C) 2021 The Psycopg Team +# Copyright (C) 2021 The GaussDB Team from __future__ import annotations @@ -12,25 +12,25 @@ from . import pq __version__: str | None = None -_psycopg: ModuleType +_gaussdb: ModuleType # Note: "c" must the first attempt so that mypy associates the variable the # right module interface. It will not result Optional, but hey. if pq.__impl__ == "c": - import psycopg_c._psycopg + import gaussdb_c._gaussdb - _psycopg = psycopg_c._psycopg - __version__ = psycopg_c.__version__ + _gaussdb = gaussdb_c._gaussdb + __version__ = gaussdb_c.__version__ elif pq.__impl__ == "binary": - import psycopg_binary._psycopg + import gaussdb_binary._gaussdb - _psycopg = psycopg_binary._psycopg - __version__ = psycopg_binary.__version__ + _gaussdb = gaussdb_binary._gaussdb + __version__ = gaussdb_binary.__version__ elif pq.__impl__ == "python": - _psycopg = None # type: ignore[assignment] + _gaussdb = None # type: ignore[assignment] else: - raise ImportError(f"can't find _psycopg optimised module in {pq.__impl__!r}") + raise ImportError(f"can't find _gaussdb optimised module in {pq.__impl__!r}") diff --git a/psycopg/psycopg/_column.py b/gaussdb/gaussdb/_column.py similarity index 97% rename from psycopg/psycopg/_column.py rename to gaussdb/gaussdb/_column.py index 4cc261545..18aca6c03 100644 --- a/psycopg/psycopg/_column.py +++ b/gaussdb/gaussdb/_column.py @@ -2,7 +2,7 @@ The Column object in Cursor.description """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -15,7 +15,7 @@ class Column(Sequence[Any]): - __module__ = "psycopg" + __module__ = "gaussdb" def __init__(self, cursor: BaseCursor[Any, Any], index: int): res = cursor.pgresult diff --git a/psycopg/psycopg/_compat.py b/gaussdb/gaussdb/_compat.py similarity index 93% rename from psycopg/psycopg/_compat.py rename to gaussdb/gaussdb/_compat.py index 5a3e72204..83fab83ca 100644 --- a/psycopg/psycopg/_compat.py +++ b/gaussdb/gaussdb/_compat.py @@ -2,7 +2,7 @@ compatibility functions for different Python versions """ -# Copyright (C) 2021 The Psycopg Team +# Copyright (C) 2021 The GaussDB Team import sys diff --git a/psycopg/psycopg/_connection_base.py b/gaussdb/gaussdb/_connection_base.py similarity index 98% rename from psycopg/psycopg/_connection_base.py rename to gaussdb/gaussdb/_connection_base.py index f2fa7121d..b760b3329 100644 --- a/psycopg/psycopg/_connection_base.py +++ b/gaussdb/gaussdb/_connection_base.py @@ -1,8 +1,8 @@ """ -psycopg connection objects +gaussdb connection objects """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -30,7 +30,7 @@ from ._connection_info import ConnectionInfo if TYPE_CHECKING: - from psycopg_pool.base import BasePool + from gaussdb_pool.base import BasePool from .pq.abc import PGconn, PGresult @@ -53,7 +53,7 @@ _HAS_SEND_CLOSE = capabilities.has_send_close_prepared() -logger = logging.getLogger("psycopg") +logger = logging.getLogger("gaussdb") class Notify(NamedTuple): @@ -69,7 +69,7 @@ class Notify(NamedTuple): """The PID of the backend process which sent the notification.""" -Notify.__module__ = "psycopg" +Notify.__module__ = "gaussdb" NoticeHandler: TypeAlias = Callable[[e.Diagnostic], None] NotifyHandler: TypeAlias = Callable[[Notify], None] @@ -322,7 +322,7 @@ def add_notice_handler(self, callback: NoticeHandler) -> None: Register a callable to be invoked when a notice message is received. :param callback: the callback to call upon message received. - :type callback: Callable[[~psycopg.errors.Diagnostic], None] + :type callback: Callable[[~gaussdb.errors.Diagnostic], None] """ self._notice_handlers.append(callback) @@ -331,7 +331,7 @@ def remove_notice_handler(self, callback: NoticeHandler) -> None: Unregister a notice message callable previously registered. :param callback: the callback to remove. - :type callback: Callable[[~psycopg.errors.Diagnostic], None] + :type callback: Callable[[~gaussdb.errors.Diagnostic], None] """ self._notice_handlers.remove(callback) @@ -355,7 +355,7 @@ def add_notify_handler(self, callback: NotifyHandler) -> None: Register a callable to be invoked whenever a notification is received. :param callback: the callback to call upon notification received. - :type callback: Callable[[~psycopg.Notify], None] + :type callback: Callable[[~gaussdb.Notify], None] """ self._notify_handlers.append(callback) @@ -364,7 +364,7 @@ def remove_notify_handler(self, callback: NotifyHandler) -> None: Unregister a notification callable previously registered. :param callback: the callback to remove. - :type callback: Callable[[~psycopg.Notify], None] + :type callback: Callable[[~gaussdb.Notify], None] """ self._notify_handlers.remove(callback) diff --git a/psycopg/psycopg/_connection_info.py b/gaussdb/gaussdb/_connection_info.py similarity index 98% rename from psycopg/psycopg/_connection_info.py rename to gaussdb/gaussdb/_connection_info.py index 0db591310..75986e9a9 100644 --- a/psycopg/psycopg/_connection_info.py +++ b/gaussdb/gaussdb/_connection_info.py @@ -2,7 +2,7 @@ Objects to return information about a PostgreSQL connection. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -17,7 +17,7 @@ class ConnectionInfo: """Allow access to information about the connection.""" - __module__ = "psycopg" + __module__ = "gaussdb" def __init__(self, pgconn: pq.abc.PGconn): self.pgconn = pgconn diff --git a/psycopg/psycopg/_conninfo_attempts.py b/gaussdb/gaussdb/_conninfo_attempts.py similarity index 95% rename from psycopg/psycopg/_conninfo_attempts.py rename to gaussdb/gaussdb/_conninfo_attempts.py index 7bc96dd99..440f822fa 100644 --- a/psycopg/psycopg/_conninfo_attempts.py +++ b/gaussdb/gaussdb/_conninfo_attempts.py @@ -5,7 +5,7 @@ Separate connection attempts from a connection string. """ -# Copyright (C) 2024 The Psycopg Team +# Copyright (C) 2024 The GaussDB Team from __future__ import annotations @@ -17,7 +17,7 @@ from .abc import ConnDict, ConnMapping from ._conninfo_utils import get_param, get_param_def, is_ip_address, split_attempts -logger = logging.getLogger("psycopg") +logger = logging.getLogger("gaussdb") def conninfo_attempts(params: ConnMapping) -> list[ConnDict]: @@ -73,7 +73,7 @@ def _resolve_hostnames(params: ConnDict) -> list[ConnDict]: addresses asynchronously. :param params: The input parameters, for instance as returned by - `~psycopg.conninfo.conninfo_to_dict()`. The function expects at most + `~gaussdb.conninfo.conninfo_to_dict()`. The function expects at most a single entry for host, hostaddr because it is designed to further process the input of split_attempts(). diff --git a/psycopg/psycopg/_conninfo_attempts_async.py b/gaussdb/gaussdb/_conninfo_attempts_async.py similarity index 96% rename from psycopg/psycopg/_conninfo_attempts_async.py rename to gaussdb/gaussdb/_conninfo_attempts_async.py index e50e4f95e..10d02a8e6 100644 --- a/psycopg/psycopg/_conninfo_attempts_async.py +++ b/gaussdb/gaussdb/_conninfo_attempts_async.py @@ -2,7 +2,7 @@ Separate connection attempts from a connection string. """ -# Copyright (C) 2024 The Psycopg Team +# Copyright (C) 2024 The GaussDB Team from __future__ import annotations @@ -17,7 +17,7 @@ if True: # ASYNC: import asyncio -logger = logging.getLogger("psycopg") +logger = logging.getLogger("gaussdb") async def conninfo_attempts_async(params: ConnMapping) -> list[ConnDict]: @@ -71,7 +71,7 @@ async def _resolve_hostnames(params: ConnDict) -> list[ConnDict]: addresses asynchronously. :param params: The input parameters, for instance as returned by - `~psycopg.conninfo.conninfo_to_dict()`. The function expects at most + `~gaussdb.conninfo.conninfo_to_dict()`. The function expects at most a single entry for host, hostaddr because it is designed to further process the input of split_attempts(). diff --git a/psycopg/psycopg/_conninfo_utils.py b/gaussdb/gaussdb/_conninfo_utils.py similarity index 98% rename from psycopg/psycopg/_conninfo_utils.py rename to gaussdb/gaussdb/_conninfo_utils.py index 844e71abd..1adaf785d 100644 --- a/psycopg/psycopg/_conninfo_utils.py +++ b/gaussdb/gaussdb/_conninfo_utils.py @@ -2,7 +2,7 @@ Internal utilities to manipulate connection strings """ -# Copyright (C) 2024 The Psycopg Team +# Copyright (C) 2024 The GaussDB Team from __future__ import annotations diff --git a/psycopg/psycopg/_copy.py b/gaussdb/gaussdb/_copy.py similarity index 98% rename from psycopg/psycopg/_copy.py rename to gaussdb/gaussdb/_copy.py index 2393b9fac..1f3a5e93c 100644 --- a/psycopg/psycopg/_copy.py +++ b/gaussdb/gaussdb/_copy.py @@ -5,7 +5,7 @@ Objects to support the COPY protocol (sync version). """ -# Copyright (C) 2023 The Psycopg Team +# Copyright (C) 2023 The GaussDB Team from __future__ import annotations @@ -44,10 +44,10 @@ class Copy(BaseCopy["Connection[Any]"]): :sql:`COPY` operation, because the operation result describes the format too. The parameter is useful when a `!Copy` object is created manually and no operation is performed on the cursor, such as when using ``writer=``\\ - `~psycopg.copy.FileWriter`. + `~gaussdb.copy.FileWriter`. """ - __module__ = "psycopg" + __module__ = "gaussdb" writer: Writer @@ -188,7 +188,7 @@ class LibpqWriter(Writer): An `Writer` to write copy data to a Postgres database. """ - __module__ = "psycopg.copy" + __module__ = "gaussdb.copy" def __init__(self, cursor: Cursor[Any]): self.cursor = cursor @@ -239,7 +239,7 @@ class QueuedLibpqWriter(LibpqWriter): on the connection. """ - __module__ = "psycopg.copy" + __module__ = "gaussdb.copy" def __init__(self, cursor: Cursor[Any]): super().__init__(cursor) diff --git a/psycopg/psycopg/_copy_async.py b/gaussdb/gaussdb/_copy_async.py similarity index 98% rename from psycopg/psycopg/_copy_async.py rename to gaussdb/gaussdb/_copy_async.py index 697ef034e..c7f2a55e3 100644 --- a/psycopg/psycopg/_copy_async.py +++ b/gaussdb/gaussdb/_copy_async.py @@ -2,7 +2,7 @@ Objects to support the COPY protocol (async version). """ -# Copyright (C) 2023 The Psycopg Team +# Copyright (C) 2023 The GaussDB Team from __future__ import annotations @@ -41,10 +41,10 @@ class AsyncCopy(BaseCopy["AsyncConnection[Any]"]): :sql:`COPY` operation, because the operation result describes the format too. The parameter is useful when a `!Copy` object is created manually and no operation is performed on the cursor, such as when using ``writer=``\\ - `~psycopg.copy.FileWriter`. + `~gaussdb.copy.FileWriter`. """ - __module__ = "psycopg" + __module__ = "gaussdb" writer: AsyncWriter @@ -187,7 +187,7 @@ class AsyncLibpqWriter(AsyncWriter): An `AsyncWriter` to write copy data to a Postgres database. """ - __module__ = "psycopg.copy" + __module__ = "gaussdb.copy" def __init__(self, cursor: AsyncCursor[Any]): self.cursor = cursor @@ -238,7 +238,7 @@ class AsyncQueuedLibpqWriter(AsyncLibpqWriter): on the connection. """ - __module__ = "psycopg.copy" + __module__ = "gaussdb.copy" def __init__(self, cursor: AsyncCursor[Any]): super().__init__(cursor) diff --git a/psycopg/psycopg/_copy_base.py b/gaussdb/gaussdb/_copy_base.py similarity index 97% rename from psycopg/psycopg/_copy_base.py rename to gaussdb/gaussdb/_copy_base.py index 93a636f9e..2888109d0 100644 --- a/psycopg/psycopg/_copy_base.py +++ b/gaussdb/gaussdb/_copy_base.py @@ -1,8 +1,8 @@ """ -psycopg copy support +gaussdb copy support """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -18,7 +18,7 @@ from . import pq from .abc import Buffer, ConnectionType, PQGen, Transformer from .pq.misc import connection_summary -from ._cmodule import _psycopg +from ._cmodule import _gaussdb from .generators import copy_from if TYPE_CHECKING: @@ -433,11 +433,11 @@ def _load_sub(m: re.Match[bytes], __map: dict[bytes, bytes] = _load_repl) -> byt # Override functions with fast versions if available -if _psycopg: - format_row_text = _psycopg.format_row_text - format_row_binary = _psycopg.format_row_binary - parse_row_text = _psycopg.parse_row_text - parse_row_binary = _psycopg.parse_row_binary +if _gaussdb: + format_row_text = _gaussdb.format_row_text + format_row_binary = _gaussdb.format_row_binary + parse_row_text = _gaussdb.parse_row_text + parse_row_binary = _gaussdb.parse_row_binary else: format_row_text = _format_row_text diff --git a/psycopg/psycopg/_cursor_base.py b/gaussdb/gaussdb/_cursor_base.py similarity index 99% rename from psycopg/psycopg/_cursor_base.py rename to gaussdb/gaussdb/_cursor_base.py index 9142e3bc8..b08de7202 100644 --- a/psycopg/psycopg/_cursor_base.py +++ b/gaussdb/gaussdb/_cursor_base.py @@ -1,8 +1,8 @@ """ -Psycopg BaseCursor object +GaussDB BaseCursor object """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations diff --git a/psycopg/psycopg/_dns.py b/gaussdb/gaussdb/_dns.py similarity index 97% rename from psycopg/psycopg/_dns.py rename to gaussdb/gaussdb/_dns.py index 363c3c429..1dd0937cd 100644 --- a/psycopg/psycopg/_dns.py +++ b/gaussdb/gaussdb/_dns.py @@ -3,7 +3,7 @@ DNS query support """ -# Copyright (C) 2021 The Psycopg Team +# Copyright (C) 2021 The GaussDB Team from __future__ import annotations @@ -21,7 +21,7 @@ from dns.asyncresolver import Resolver as AsyncResolver except ImportError: raise ImportError( - "the module psycopg._dns requires the package 'dnspython' installed" + "the module gaussdb._dns requires the package 'dnspython' installed" ) from . import conninfo @@ -43,11 +43,11 @@ async def resolve_hostaddr_async(params: dict[str, Any]) -> dict[str, Any]: .. deprecated:: 3.1 The use of this function is not necessary anymore, because - `psycopg.AsyncConnection.connect()` performs non-blocking name + `gaussdb.AsyncConnection.connect()` performs non-blocking name resolution automatically. """ warnings.warn( - "from psycopg 3.1, resolve_hostaddr_async() is not needed anymore", + "from gaussdb 3.1, resolve_hostaddr_async() is not needed anymore", DeprecationWarning, ) hosts: list[str] = [] diff --git a/psycopg/psycopg/_encodings.py b/gaussdb/gaussdb/_encodings.py similarity index 97% rename from psycopg/psycopg/_encodings.py rename to gaussdb/gaussdb/_encodings.py index 556322b41..6a16f9fe7 100644 --- a/psycopg/psycopg/_encodings.py +++ b/gaussdb/gaussdb/_encodings.py @@ -2,7 +2,7 @@ Mappings between PostgreSQL and Python encodings. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -81,7 +81,7 @@ def conn_encoding(conn: BaseConnection[Any] | None) -> str: """ - Return the Python encoding name of a psycopg connection. + Return the Python encoding name of a gaussdb connection. Default to utf8 if the connection has no encoding info. """ diff --git a/psycopg/psycopg/_enums.py b/gaussdb/gaussdb/_enums.py similarity index 89% rename from psycopg/psycopg/_enums.py rename to gaussdb/gaussdb/_enums.py index 1975650c6..550f8bfa9 100644 --- a/psycopg/psycopg/_enums.py +++ b/gaussdb/gaussdb/_enums.py @@ -1,11 +1,11 @@ """ -Enum values for psycopg +Enum values for gaussdb These values are defined by us and are not necessarily dependent on libpq-defined enums. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from enum import Enum, IntEnum from selectors import EVENT_READ, EVENT_WRITE @@ -30,11 +30,11 @@ class PyFormat(str, Enum): """ Enum representing the format wanted for a query argument. - The value `AUTO` allows psycopg to choose the best format for a certain + The value `AUTO` allows gaussdb to choose the best format for a certain parameter. """ - __module__ = "psycopg.adapt" + __module__ = "gaussdb.adapt" AUTO = "s" """Automatically chosen (``%s`` placeholder).""" @@ -57,7 +57,7 @@ class IsolationLevel(IntEnum): Enum representing the isolation level for a transaction. """ - __module__ = "psycopg" + __module__ = "gaussdb" READ_UNCOMMITTED = 1 """:sql:`READ UNCOMMITTED` isolation level.""" diff --git a/psycopg/psycopg/_oids.py b/gaussdb/gaussdb/_oids.py similarity index 94% rename from psycopg/psycopg/_oids.py rename to gaussdb/gaussdb/_oids.py index 350338b9e..f90988af3 100644 --- a/psycopg/psycopg/_oids.py +++ b/gaussdb/gaussdb/_oids.py @@ -2,11 +2,11 @@ PostgreSQL known type OIDs This is an internal module. Types are publicly exposed by -`psycopg.postgres.types`. This module is only used to know the OIDs at import +`gaussdb.postgres.types`. This module is only used to know the OIDs at import time and avoid circular import problems. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team # A couple of special cases used a bit everywhere. INVALID_OID = 0 diff --git a/psycopg/psycopg/_pipeline.py b/gaussdb/gaussdb/_pipeline.py similarity index 97% rename from psycopg/psycopg/_pipeline.py rename to gaussdb/gaussdb/_pipeline.py index 8a4365531..13a7d6521 100644 --- a/psycopg/psycopg/_pipeline.py +++ b/gaussdb/gaussdb/_pipeline.py @@ -2,7 +2,7 @@ commands pipeline management """ -# Copyright (C) 2021 The Psycopg Team +# Copyright (C) 2021 The GaussDB Team from __future__ import annotations @@ -38,7 +38,7 @@ ACTIVE = pq.TransactionStatus.ACTIVE -logger = logging.getLogger("psycopg") +logger = logging.getLogger("gaussdb") class BasePipeline: @@ -63,7 +63,7 @@ def status(self) -> pq.PipelineStatus: @classmethod def is_supported(cls) -> bool: - """Return `!True` if the psycopg libpq wrapper supports pipeline mode.""" + """Return `!True` if the gaussdb libpq wrapper supports pipeline mode.""" return capabilities.has_pipeline() def _enter_gen(self) -> PQGen[None]: @@ -74,7 +74,7 @@ def _enter_gen(self) -> PQGen[None]: # Nested pipeline case. # Transaction might be ACTIVE when the pipeline uses an "implicit # transaction", typically in autocommit mode. But when entering a - # Psycopg transaction(), we expect the IDLE state. By sync()-ing, + # GaussDB transaction(), we expect the IDLE state. By sync()-ing, # we make sure all previous commands are completed and the # transaction gets back to IDLE. yield from self._sync_gen() @@ -189,7 +189,7 @@ def _enqueue_sync(self) -> None: class Pipeline(BasePipeline): """Handler for connection in pipeline mode.""" - __module__ = "psycopg" + __module__ = "gaussdb" _conn: Connection[Any] def __init__(self, conn: Connection[Any]) -> None: @@ -232,7 +232,7 @@ def __exit__( class AsyncPipeline(BasePipeline): """Handler for async connection in pipeline mode.""" - __module__ = "psycopg" + __module__ = "gaussdb" _conn: AsyncConnection[Any] def __init__(self, conn: AsyncConnection[Any]) -> None: diff --git a/psycopg/psycopg/_preparing.py b/gaussdb/gaussdb/_preparing.py similarity index 99% rename from psycopg/psycopg/_preparing.py rename to gaussdb/gaussdb/_preparing.py index 79a456819..bfffe4cd4 100644 --- a/psycopg/psycopg/_preparing.py +++ b/gaussdb/gaussdb/_preparing.py @@ -2,7 +2,7 @@ Support for prepared statements """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations diff --git a/psycopg/psycopg/_py_transformer.py b/gaussdb/gaussdb/_py_transformer.py similarity index 98% rename from psycopg/psycopg/_py_transformer.py rename to gaussdb/gaussdb/_py_transformer.py index 2749f7dd8..11d014df5 100644 --- a/psycopg/psycopg/_py_transformer.py +++ b/gaussdb/gaussdb/_py_transformer.py @@ -3,11 +3,11 @@ Python implementation of the object. Use the `_transformer module to import the right implementation (Python or C). The public place where the object -is exported is `psycopg.adapt` (which we may not use to avoid circular +is exported is `gaussdb.adapt` (which we may not use to avoid circular dependencies problems). """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -49,7 +49,7 @@ class Transformer(AdaptContext): """ - __module__ = "psycopg.adapt" + __module__ = "gaussdb.adapt" __slots__ = """ types formats diff --git a/psycopg/psycopg/_queries.py b/gaussdb/gaussdb/_queries.py similarity index 99% rename from psycopg/psycopg/_queries.py rename to gaussdb/gaussdb/_queries.py index ff61b2ac6..3e167103c 100644 --- a/psycopg/psycopg/_queries.py +++ b/gaussdb/gaussdb/_queries.py @@ -2,7 +2,7 @@ Utility module to manipulate queries """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -77,7 +77,7 @@ def convert(self, query: Query, vars: Params | None) -> None: # parameters. They are usually generated by ORMs and have poor # cacheablility (e.g. INSERT ... VALUES (...), (...) with varying # numbers of tuples. - # see https://github.com/psycopg/psycopg/discussions/628 + # see https://github.com/gaussdb/gaussdb/discussions/628 if ( len(bquery) <= MAX_CACHED_STATEMENT_LENGTH and len(vars) <= MAX_CACHED_STATEMENT_PARAMS diff --git a/psycopg/psycopg/_struct.py b/gaussdb/gaussdb/_struct.py similarity index 90% rename from psycopg/psycopg/_struct.py rename to gaussdb/gaussdb/_struct.py index 72536d6bd..cbd05de57 100644 --- a/psycopg/psycopg/_struct.py +++ b/gaussdb/gaussdb/_struct.py @@ -2,7 +2,7 @@ Utility functions to deal with binary structs. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -46,9 +46,9 @@ def __call__(self, data: Buffer, start: int | None) -> tuple[int]: ... def pack_float4_bug_304(x: float) -> bytes: raise e.InterfaceError( - "cannot dump Float4: Python affected by bug #304. Note that the psycopg-c" - " and psycopg-binary packages are not affected by this issue." - " See https://github.com/psycopg/psycopg/issues/304" + "cannot dump Float4: Python affected by bug #304. Note that the gaussdb-c" + " and gaussdb-binary packages are not affected by this issue." + " See https://github.com/gaussdb/gaussdb/issues/304" ) diff --git a/psycopg/psycopg/_tpc.py b/gaussdb/gaussdb/_tpc.py similarity index 97% rename from psycopg/psycopg/_tpc.py rename to gaussdb/gaussdb/_tpc.py index e3719010c..7527d183a 100644 --- a/psycopg/psycopg/_tpc.py +++ b/gaussdb/gaussdb/_tpc.py @@ -1,8 +1,8 @@ """ -psycopg two-phase commit support +gaussdb two-phase commit support """ -# Copyright (C) 2021 The Psycopg Team +# Copyright (C) 2021 The GaussDB Team from __future__ import annotations @@ -112,4 +112,4 @@ def _from_record( return replace(xid, prepared=prepared, owner=owner, database=database) -Xid.__module__ = "psycopg" +Xid.__module__ = "gaussdb" diff --git a/psycopg/psycopg/_transformer.py b/gaussdb/gaussdb/_transformer.py similarity index 73% rename from psycopg/psycopg/_transformer.py rename to gaussdb/gaussdb/_transformer.py index d5a0aacdb..f779edd59 100644 --- a/psycopg/psycopg/_transformer.py +++ b/gaussdb/gaussdb/_transformer.py @@ -4,17 +4,17 @@ This module exports the requested implementation to the rest of the package. """ -# Copyright (C) 2023 The Psycopg Team +# Copyright (C) 2023 The GaussDB Team from __future__ import annotations from . import abc -from ._cmodule import _psycopg +from ._cmodule import _gaussdb Transformer: type[abc.Transformer] -if _psycopg: - Transformer = _psycopg.Transformer +if _gaussdb: + Transformer = _gaussdb.Transformer else: from . import _py_transformer diff --git a/psycopg/psycopg/_typeinfo.py b/gaussdb/gaussdb/_typeinfo.py similarity index 97% rename from psycopg/psycopg/_typeinfo.py rename to gaussdb/gaussdb/_typeinfo.py index c949fd185..469ab18b0 100644 --- a/psycopg/psycopg/_typeinfo.py +++ b/gaussdb/gaussdb/_typeinfo.py @@ -5,7 +5,7 @@ information to the adapters if needed. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -34,7 +34,7 @@ class TypeInfo: Hold information about a PostgreSQL base type. """ - __module__ = "psycopg.types" + __module__ = "gaussdb.types" def __init__( self, @@ -97,7 +97,7 @@ def _fetch(cls: type[T], conn: Connection[Any], name: str) -> T | None: # the function with the connection in the state we found (either idle # or intrans) try: - from psycopg import Cursor + from gaussdb import Cursor with conn.transaction(), Cursor(conn, row_factory=dict_row) as cur: if conn_encoding(conn) == "ascii": @@ -114,7 +114,7 @@ async def _fetch_async( cls: type[T], conn: AsyncConnection[Any], name: str ) -> T | None: try: - from psycopg import AsyncCursor + from gaussdb import AsyncCursor async with conn.transaction(): async with AsyncCursor(conn, row_factory=dict_row) as cur: @@ -215,7 +215,7 @@ class TypesRegistry: Container for the information about types in a database. """ - __module__ = "psycopg.types" + __module__ = "gaussdb.types" def __init__(self, template: TypesRegistry | None = None): self._registry: dict[RegistryKey, TypeInfo] @@ -318,8 +318,8 @@ def get_by_subtype(self, cls: type[T], subtype: int | str) -> T | None: Return info about a `TypeInfo` subclass by its element name or oid. :param cls: the subtype of `!TypeInfo` to look for. Currently - supported are `~psycopg.types.range.RangeInfo` and - `~psycopg.types.multirange.MultirangeInfo`. + supported are `~gaussdb.types.range.RangeInfo` and + `~gaussdb.types.multirange.MultirangeInfo`. :param subtype: The name or OID of the subtype of the element to look for. :return: The `!TypeInfo` object of class `!cls` whose subtype is `!subtype`. `!None` if the element or its range are not found. diff --git a/psycopg/psycopg/_typemod.py b/gaussdb/gaussdb/_typemod.py similarity index 98% rename from psycopg/psycopg/_typemod.py rename to gaussdb/gaussdb/_typemod.py index a558b0847..3b45c4126 100644 --- a/psycopg/psycopg/_typemod.py +++ b/gaussdb/gaussdb/_typemod.py @@ -5,7 +5,7 @@ of a column - the numeric part of varchar(10) or decimal(6,2). """ -# Copyright (C) 2024 The Psycopg Team +# Copyright (C) 2024 The GaussDB Team from __future__ import annotations diff --git a/psycopg/psycopg/_tz.py b/gaussdb/gaussdb/_tz.py similarity index 93% rename from psycopg/psycopg/_tz.py rename to gaussdb/gaussdb/_tz.py index a3dfbac54..d046e6a9e 100644 --- a/psycopg/psycopg/_tz.py +++ b/gaussdb/gaussdb/_tz.py @@ -2,7 +2,7 @@ Timezone utility functions. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -12,7 +12,7 @@ from .pq.abc import PGconn -logger = logging.getLogger("psycopg") +logger = logging.getLogger("gaussdb") _timezones: dict[bytes | None, tzinfo] = { None: timezone.utc, diff --git a/psycopg/psycopg/_wrappers.py b/gaussdb/gaussdb/_wrappers.py similarity index 95% rename from psycopg/psycopg/_wrappers.py rename to gaussdb/gaussdb/_wrappers.py index f86174171..1d48d5286 100644 --- a/psycopg/psycopg/_wrappers.py +++ b/gaussdb/gaussdb/_wrappers.py @@ -2,13 +2,13 @@ Wrappers for numeric types. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team # Wrappers to force numbers to be cast as specific PostgreSQL types -# These types are implemented here but exposed by `psycopg.types.numeric`. +# These types are implemented here but exposed by `gaussdb.types.numeric`. # They are defined here to avoid a circular import. -_MODULE = "psycopg.types.numeric" +_MODULE = "gaussdb.types.numeric" class Int2(int): diff --git a/psycopg/psycopg/abc.py b/gaussdb/gaussdb/abc.py similarity index 95% rename from psycopg/psycopg/abc.py rename to gaussdb/gaussdb/abc.py index d63e97141..83f8862a9 100644 --- a/psycopg/psycopg/abc.py +++ b/gaussdb/gaussdb/abc.py @@ -2,7 +2,7 @@ Protocol objects representing different implementations of the same classes. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -71,8 +71,8 @@ class AdaptContext(Protocol): """ A context describing how types are adapted. - Example of `~AdaptContext` are `~psycopg.Connection`, `~psycopg.Cursor`, - `~psycopg.adapt.Transformer`, `~psycopg.adapt.AdaptersMap`. + Example of `~AdaptContext` are `~gaussdb.Connection`, `~gaussdb.Cursor`, + `~gaussdb.adapt.Transformer`, `~gaussdb.adapt.AdaptersMap`. Note that this is a `~typing.Protocol`, so objects implementing `!AdaptContext` don't need to explicitly inherit from this class. @@ -88,7 +88,7 @@ def adapters(self) -> AdaptersMap: def connection(self) -> BaseConnection[Any] | None: """The connection used by this object, if available. - :rtype: `~psycopg.Connection` or `~psycopg.AsyncConnection` or `!None` + :rtype: `~gaussdb.Connection` or `~gaussdb.AsyncConnection` or `!None` """ ... @@ -101,7 +101,7 @@ class Dumper(Protocol): format: pq.Format """ The format that this class `dump()` method produces, - `~psycopg.pq.Format.TEXT` or `~psycopg.pq.Format.BINARY`. + `~gaussdb.pq.Format.TEXT` or `~gaussdb.pq.Format.BINARY`. This is a class attribute. """ @@ -184,7 +184,7 @@ class Loader(Protocol): format: pq.Format """ The format that this class `load()` method can convert, - `~psycopg.pq.Format.TEXT` or `~psycopg.pq.Format.BINARY`. + `~gaussdb.pq.Format.TEXT` or `~gaussdb.pq.Format.BINARY`. This is a class attribute. """ diff --git a/psycopg/psycopg/adapt.py b/gaussdb/gaussdb/adapt.py similarity index 93% rename from psycopg/psycopg/adapt.py rename to gaussdb/gaussdb/adapt.py index 2918f555b..7ae4e6ce9 100644 --- a/psycopg/psycopg/adapt.py +++ b/gaussdb/gaussdb/adapt.py @@ -2,7 +2,7 @@ Entry point into the adaptation system. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -66,7 +66,7 @@ def quote(self, obj: Any) -> Buffer: return esc.escape_literal(value) # This path is taken when quote is asked without a connection, - # usually it means by psycopg.sql.quote() or by + # usually it means by gaussdb.sql.quote() or by # 'Composible.as_string(None)'. Most often than not this is done by # someone generating a SQL file to consume elsewhere. @@ -96,8 +96,8 @@ def quote(self, obj: Any) -> Buffer: def get_key(self, obj: Any, format: PyFormat) -> abc.DumperKey: """ - Implementation of the `~psycopg.abc.Dumper.get_key()` member of the - `~psycopg.abc.Dumper` protocol. Look at its definition for details. + Implementation of the `~gaussdb.abc.Dumper.get_key()` member of the + `~gaussdb.abc.Dumper` protocol. Look at its definition for details. This implementation returns the `!cls` passed in the constructor. Subclasses needing to specialise the PostgreSQL type according to the @@ -109,8 +109,8 @@ def get_key(self, obj: Any, format: PyFormat) -> abc.DumperKey: def upgrade(self, obj: Any, format: PyFormat) -> Dumper: """ - Implementation of the `~psycopg.abc.Dumper.upgrade()` member of the - `~psycopg.abc.Dumper` protocol. Look at its definition for details. + Implementation of the `~gaussdb.abc.Dumper.upgrade()` member of the + `~gaussdb.abc.Dumper` protocol. Look at its definition for details. This implementation just returns `!self`. If a subclass implements `get_key()` it should probably override `!upgrade()` too. diff --git a/psycopg/psycopg/client_cursor.py b/gaussdb/gaussdb/client_cursor.py similarity index 95% rename from psycopg/psycopg/client_cursor.py rename to gaussdb/gaussdb/client_cursor.py index 5088b16b2..3265e4849 100644 --- a/psycopg/psycopg/client_cursor.py +++ b/gaussdb/gaussdb/client_cursor.py @@ -1,8 +1,8 @@ """ -psycopg client-side binding cursors +gaussdb client-side binding cursors """ -# Copyright (C) 2022 The Psycopg Team +# Copyright (C) 2022 The GaussDB Team from __future__ import annotations @@ -84,10 +84,10 @@ def _get_prepared( class ClientCursor(ClientCursorMixin["Connection[Any]", Row], Cursor[Row]): - __module__ = "psycopg" + __module__ = "gaussdb" class AsyncClientCursor( ClientCursorMixin["AsyncConnection[Any]", Row], AsyncCursor[Row] ): - __module__ = "psycopg" + __module__ = "gaussdb" diff --git a/psycopg/psycopg/connection.py b/gaussdb/gaussdb/connection.py similarity index 99% rename from psycopg/psycopg/connection.py rename to gaussdb/gaussdb/connection.py index d7035e276..ef342cc77 100644 --- a/psycopg/psycopg/connection.py +++ b/gaussdb/gaussdb/connection.py @@ -2,10 +2,10 @@ # from the original file 'connection_async.py' # DO NOT CHANGE! Change the original file instead. """ -Psycopg connection object (sync version) +GaussDB connection object (sync version) """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -49,7 +49,7 @@ _INTERRUPTED = KeyboardInterrupt -logger = logging.getLogger("psycopg") +logger = logging.getLogger("gaussdb") class Connection(BaseConnection[Row]): @@ -57,7 +57,7 @@ class Connection(BaseConnection[Row]): Wrapper for a connection to the database. """ - __module__ = "psycopg" + __module__ = "gaussdb" cursor_factory: type[Cursor[Row]] server_cursor_factory: type[ServerCursor[Row]] diff --git a/psycopg/psycopg/connection_async.py b/gaussdb/gaussdb/connection_async.py similarity index 98% rename from psycopg/psycopg/connection_async.py rename to gaussdb/gaussdb/connection_async.py index 94a8bf3c1..380bc6f0c 100644 --- a/psycopg/psycopg/connection_async.py +++ b/gaussdb/gaussdb/connection_async.py @@ -1,8 +1,8 @@ """ -Psycopg connection object (async version) +GaussDB connection object (async version) """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -55,7 +55,7 @@ else: _INTERRUPTED = KeyboardInterrupt -logger = logging.getLogger("psycopg") +logger = logging.getLogger("gaussdb") class AsyncConnection(BaseConnection[Row]): @@ -63,7 +63,7 @@ class AsyncConnection(BaseConnection[Row]): Wrapper for a connection to the database. """ - __module__ = "psycopg" + __module__ = "gaussdb" cursor_factory: type[AsyncCursor[Row]] server_cursor_factory: type[AsyncServerCursor[Row]] @@ -101,7 +101,7 @@ async def connect( loop = asyncio.get_running_loop() if isinstance(loop, asyncio.ProactorEventLoop): raise e.InterfaceError( - "Psycopg cannot use the 'ProactorEventLoop' to run in async" + "GaussDB cannot use the 'ProactorEventLoop' to run in async" " mode. Please use a compatible event loop, for instance by" " setting 'asyncio.set_event_loop_policy" "(WindowsSelectorEventLoopPolicy())'" diff --git a/psycopg/psycopg/conninfo.py b/gaussdb/gaussdb/conninfo.py similarity index 96% rename from psycopg/psycopg/conninfo.py rename to gaussdb/gaussdb/conninfo.py index e8c33876b..22d2c8f87 100644 --- a/psycopg/psycopg/conninfo.py +++ b/gaussdb/gaussdb/conninfo.py @@ -2,7 +2,7 @@ Functions to manipulate conninfo strings """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -32,7 +32,7 @@ def make_conninfo(conninfo: str = "", **kwargs: ConnParam) -> str: :return: A connection string valid for PostgreSQL, with the `!kwargs` parameters merged. - Raise `~psycopg.ProgrammingError` if the input doesn't make a valid + Raise `~gaussdb.ProgrammingError` if the input doesn't make a valid conninfo string. .. __: https://www.postgresql.org/docs/current/libpq-connect.html @@ -73,7 +73,7 @@ def conninfo_to_dict(conninfo: str = "", **kwargs: ConnParam) -> ConnDict: :return: Dictionary with the parameters parsed from `!conninfo` and `!kwargs`. - Raise `~psycopg.ProgrammingError` if `!conninfo` is not a a valid connection + Raise `~gaussdb.ProgrammingError` if `!conninfo` is not a a valid connection string. .. __: https://www.postgresql.org/docs/current/libpq-connect.html diff --git a/psycopg/psycopg/copy.py b/gaussdb/gaussdb/copy.py similarity index 100% rename from psycopg/psycopg/copy.py rename to gaussdb/gaussdb/copy.py diff --git a/psycopg/psycopg/crdb/__init__.py b/gaussdb/gaussdb/crdb/__init__.py similarity index 91% rename from psycopg/psycopg/crdb/__init__.py rename to gaussdb/gaussdb/crdb/__init__.py index dbdcf5f34..816322b65 100644 --- a/psycopg/psycopg/crdb/__init__.py +++ b/gaussdb/gaussdb/crdb/__init__.py @@ -2,7 +2,7 @@ CockroachDB support package. """ -# Copyright (C) 2022 The Psycopg Team +# Copyright (C) 2022 The GaussDB Team from . import _types from .connection import AsyncCrdbConnection, CrdbConnection, CrdbConnectionInfo diff --git a/psycopg/psycopg/crdb/_types.py b/gaussdb/gaussdb/crdb/_types.py similarity index 99% rename from psycopg/psycopg/crdb/_types.py rename to gaussdb/gaussdb/crdb/_types.py index fcf2a47ef..83dbbcafa 100644 --- a/psycopg/psycopg/crdb/_types.py +++ b/gaussdb/gaussdb/crdb/_types.py @@ -2,7 +2,7 @@ Types configuration specific for CockroachDB. """ -# Copyright (C) 2022 The Psycopg Team +# Copyright (C) 2022 The GaussDB Team from enum import Enum diff --git a/psycopg/psycopg/crdb/connection.py b/gaussdb/gaussdb/crdb/connection.py similarity index 92% rename from psycopg/psycopg/crdb/connection.py rename to gaussdb/gaussdb/crdb/connection.py index 60db9a876..47ef5ba27 100644 --- a/psycopg/psycopg/crdb/connection.py +++ b/gaussdb/gaussdb/crdb/connection.py @@ -2,7 +2,7 @@ CockroachDB-specific connections. """ -# Copyright (C) 2022 The Psycopg Team +# Copyright (C) 2022 The GaussDB Team from __future__ import annotations @@ -57,7 +57,7 @@ class CrdbConnection(_CrdbConnectionMixin, Connection[Row]): Wrapper for a connection to a CockroachDB database. """ - __module__ = "psycopg.crdb" + __module__ = "gaussdb.crdb" class AsyncCrdbConnection(_CrdbConnectionMixin, AsyncConnection[Row]): @@ -65,15 +65,15 @@ class AsyncCrdbConnection(_CrdbConnectionMixin, AsyncConnection[Row]): Wrapper for an async connection to a CockroachDB database. """ - __module__ = "psycopg.crdb" + __module__ = "gaussdb.crdb" class CrdbConnectionInfo(ConnectionInfo): """ - `~psycopg.ConnectionInfo` subclass to get info about a CockroachDB database. + `~gaussdb.ConnectionInfo` subclass to get info about a CockroachDB database. """ - __module__ = "psycopg.crdb" + __module__ = "gaussdb.crdb" @property def vendor(self) -> str: diff --git a/psycopg/psycopg/cursor.py b/gaussdb/gaussdb/cursor.py similarity index 99% rename from psycopg/psycopg/cursor.py rename to gaussdb/gaussdb/cursor.py index cd3e96f9f..6413d57ac 100644 --- a/psycopg/psycopg/cursor.py +++ b/gaussdb/gaussdb/cursor.py @@ -2,10 +2,10 @@ # from the original file 'cursor_async.py' # DO NOT CHANGE! Change the original file instead. """ -Psycopg Cursor object. +GaussDB Cursor object. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -30,7 +30,7 @@ class Cursor(BaseCursor["Connection[Any]", Row]): - __module__ = "psycopg" + __module__ = "gaussdb" __slots__ = () @overload diff --git a/psycopg/psycopg/cursor_async.py b/gaussdb/gaussdb/cursor_async.py similarity index 99% rename from psycopg/psycopg/cursor_async.py rename to gaussdb/gaussdb/cursor_async.py index ce1e50a64..7dfdf93b4 100644 --- a/psycopg/psycopg/cursor_async.py +++ b/gaussdb/gaussdb/cursor_async.py @@ -1,8 +1,8 @@ """ -Psycopg AsyncCursor object. +GaussDB AsyncCursor object. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -27,7 +27,7 @@ class AsyncCursor(BaseCursor["AsyncConnection[Any]", Row]): - __module__ = "psycopg" + __module__ = "gaussdb" __slots__ = () @overload diff --git a/psycopg/psycopg/dbapi20.py b/gaussdb/gaussdb/dbapi20.py similarity index 97% rename from psycopg/psycopg/dbapi20.py rename to gaussdb/gaussdb/dbapi20.py index d4da65433..7db04880b 100644 --- a/psycopg/psycopg/dbapi20.py +++ b/gaussdb/gaussdb/dbapi20.py @@ -2,7 +2,7 @@ Compatibility objects with DBAPI 2.0 """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -23,7 +23,7 @@ def __init__(self, name: str, oids: Sequence[int]): self.values = tuple(oids) def __repr__(self) -> str: - return f"psycopg.{self.name}" + return f"gaussdb.{self.name}" def __eq__(self, other: Any) -> bool: if isinstance(other, int): diff --git a/psycopg/psycopg/errors.py b/gaussdb/gaussdb/errors.py similarity index 98% rename from psycopg/psycopg/errors.py rename to gaussdb/gaussdb/errors.py index 2b92ac659..ac750c0ec 100644 --- a/psycopg/psycopg/errors.py +++ b/gaussdb/gaussdb/errors.py @@ -1,5 +1,5 @@ """ -psycopg exceptions +gaussdb exceptions DBAPI-defined Exceptions are defined in the following hierarchy:: @@ -16,7 +16,7 @@ |__NotSupportedError """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -41,8 +41,8 @@ class FinishedPGconn: """Finished libpq connection. - Attributes are set from a real `~psycopg.pq.PGconn` but any operations will - raise an `~psycopg.OperationalError`. + Attributes are set from a real `~gaussdb.pq.PGconn` but any operations will + raise an `~gaussdb.OperationalError`. """ info: list[ConninfoOption] = field(default_factory=list) @@ -244,15 +244,15 @@ class Warning(Exception): """ Exception raised for important warnings. - Defined for DBAPI compatibility, but never raised by ``psycopg``. + Defined for DBAPI compatibility, but never raised by ``gaussdb``. """ - __module__ = "psycopg" + __module__ = "gaussdb" class Error(Exception): """ - Base exception for all the errors psycopg will raise. + Base exception for all the errors gaussdb will raise. Exception that is the base class of all other error exceptions. You can use this to catch all errors with one single `!except` statement. @@ -260,7 +260,7 @@ class Error(Exception): This exception is guaranteed to be picklable. """ - __module__ = "psycopg" + __module__ = "gaussdb" sqlstate: str | None = None @@ -284,7 +284,7 @@ def __init__( def pgconn(self) -> PGconn | None: """The connection object, if the error was raised from a connection attempt. - :rtype: psycopg.pq.PGconn | None + :rtype: gaussdb.pq.PGconn | None """ return self._pgconn if self._pgconn else None @@ -292,7 +292,7 @@ def pgconn(self) -> PGconn | None: def pgresult(self) -> PGresult | None: """The result object, if the exception was raised after a failed query. - :rtype: psycopg.pq.PGresult | None + :rtype: gaussdb.pq.PGresult | None """ return self._info if _is_pgresult(self._info) else None @@ -318,7 +318,7 @@ class InterfaceError(Error): An error related to the database interface rather than the database itself. """ - __module__ = "psycopg" + __module__ = "gaussdb" class DatabaseError(Error): @@ -326,7 +326,7 @@ class DatabaseError(Error): Exception raised for errors that are related to the database. """ - __module__ = "psycopg" + __module__ = "gaussdb" def __init_subclass__(cls, code: str | None = None, name: str | None = None): if code: @@ -343,7 +343,7 @@ class DataError(DatabaseError): Examples may be division by zero, numeric value out of range, etc. """ - __module__ = "psycopg" + __module__ = "gaussdb" class OperationalError(DatabaseError): @@ -356,7 +356,7 @@ class OperationalError(DatabaseError): during processing, etc. """ - __module__ = "psycopg" + __module__ = "gaussdb" class IntegrityError(DatabaseError): @@ -366,7 +366,7 @@ class IntegrityError(DatabaseError): An example may be a foreign key check failed. """ - __module__ = "psycopg" + __module__ = "gaussdb" class InternalError(DatabaseError): @@ -377,7 +377,7 @@ class InternalError(DatabaseError): of sync, etc. """ - __module__ = "psycopg" + __module__ = "gaussdb" class ProgrammingError(DatabaseError): @@ -388,7 +388,7 @@ class ProgrammingError(DatabaseError): statement, wrong number of parameters specified, etc. """ - __module__ = "psycopg" + __module__ = "gaussdb" class NotSupportedError(DatabaseError): @@ -396,26 +396,26 @@ class NotSupportedError(DatabaseError): A method or database API was used which is not supported by the database. """ - __module__ = "psycopg" + __module__ = "gaussdb" class ConnectionTimeout(OperationalError): """ - Exception raised on timeout of the `~psycopg.Connection.connect()` method. + Exception raised on timeout of the `~gaussdb.Connection.connect()` method. The error is raised if the ``connect_timeout`` is specified and a connection is not obtained in useful time. - Subclass of `~psycopg.OperationalError`. + Subclass of `~gaussdb.OperationalError`. """ class CancellationTimeout(OperationalError): """ Exception raised on timeout of connection's - `~psycopg.Connection.cancel_safe()` method. + `~gaussdb.Connection.cancel_safe()` method. - Subclass of `~psycopg.OperationalError`. + Subclass of `~gaussdb.OperationalError`. """ @@ -423,7 +423,7 @@ class PipelineAborted(OperationalError): """ Raised when a operation fails because the current pipeline is in aborted state. - Subclass of `~psycopg.OperationalError`. + Subclass of `~gaussdb.OperationalError`. """ diff --git a/psycopg/psycopg/generators.py b/gaussdb/gaussdb/generators.py similarity index 97% rename from psycopg/psycopg/generators.py rename to gaussdb/gaussdb/generators.py index 9ff7228d6..b9f0bb720 100644 --- a/psycopg/psycopg/generators.py +++ b/gaussdb/gaussdb/generators.py @@ -18,7 +18,7 @@ generator should probably yield the same value again in order to wait more. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -31,7 +31,7 @@ from .abc import Buffer, PipelineCommand, PQGen, PQGenConn from .pq.abc import PGcancelConn, PGconn, PGresult from .waiting import Ready, Wait -from ._cmodule import _psycopg +from ._cmodule import _gaussdb from ._encodings import conninfo_encoding OK = pq.ConnStatus.OK @@ -403,14 +403,14 @@ def copy_end(pgconn: PGconn, error: bytes | None) -> PQGen[PGresult]: # Override functions with fast versions if available -if _psycopg: - connect = _psycopg.connect - cancel = _psycopg.cancel - execute = _psycopg.execute - send = _psycopg.send - fetch_many = _psycopg.fetch_many - fetch = _psycopg.fetch - pipeline_communicate = _psycopg.pipeline_communicate +if _gaussdb: + connect = _gaussdb.connect + cancel = _gaussdb.cancel + execute = _gaussdb.execute + send = _gaussdb.send + fetch_many = _gaussdb.fetch_many + fetch = _gaussdb.fetch + pipeline_communicate = _gaussdb.pipeline_communicate else: connect = _connect diff --git a/psycopg/psycopg/postgres.py b/gaussdb/gaussdb/postgres.py similarity index 99% rename from psycopg/psycopg/postgres.py rename to gaussdb/gaussdb/postgres.py index 362012027..d6373205a 100644 --- a/psycopg/psycopg/postgres.py +++ b/gaussdb/gaussdb/postgres.py @@ -2,7 +2,7 @@ Types configuration specific to PostgreSQL. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from .abc import AdaptContext from ._typemod import BitTypeModifier, CharTypeModifier, NumericTypeModifier diff --git a/psycopg/psycopg/pq/__init__.py b/gaussdb/gaussdb/pq/__init__.py similarity index 85% rename from psycopg/psycopg/pq/__init__.py rename to gaussdb/gaussdb/pq/__init__.py index 4f242980d..5a67e4d4d 100644 --- a/psycopg/psycopg/pq/__init__.py +++ b/gaussdb/gaussdb/pq/__init__.py @@ -1,5 +1,5 @@ """ -psycopg libpq wrapper +gaussdb libpq wrapper This package exposes the libpq functionalities as Python objects and functions. @@ -7,7 +7,7 @@ implementation-dependant but all the implementations share the same interface. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -23,7 +23,7 @@ logger = logging.getLogger(__name__) __impl__: str -"""The currently loaded implementation of the `!psycopg.pq` package. +"""The currently loaded implementation of the `!gaussdb.pq` package. Possible values include ``python``, ``c``, ``binary``. """ @@ -31,7 +31,7 @@ __build_version__: int """The libpq version the C package was built with. -A number in the same format of `~psycopg.ConnectionInfo.server_version` +A number in the same format of `~gaussdb.ConnectionInfo.server_version` representing the libpq used to build the speedup module (``c``, ``binary``) if available. @@ -58,29 +58,29 @@ def import_from_libpq() -> None: global __impl__, version, __build_version__ global PGconn, PGresult, Conninfo, Escaping, PGcancel, PGcancelConn - impl = os.environ.get("PSYCOPG_IMPL", "").lower() + impl = os.environ.get("GAUSSDB_IMPL", "").lower() module = None attempts: list[str] = [] def handle_error(name: str, e: Exception) -> None: if not impl: - msg = f"couldn't import psycopg '{name}' implementation: {e}" + msg = f"couldn't import gaussdb '{name}' implementation: {e}" attempts.append(msg) else: - msg = f"couldn't import requested psycopg '{name}' implementation: {e}" + msg = f"couldn't import requested gaussdb '{name}' implementation: {e}" raise ImportError(msg) from e # The best implementation: fast but requires the system libpq installed if not impl or impl == "c": try: - from psycopg_c import pq as module # type: ignore + from gaussdb_c import pq as module # type: ignore except Exception as e: handle_error("c", e) # Second best implementation: fast and stand-alone if not module and (not impl or impl == "binary"): try: - from psycopg_binary import pq as module # type: ignore + from gaussdb_binary import pq as module # type: ignore except Exception as e: handle_error("binary", e) @@ -102,7 +102,7 @@ def handle_error(name: str, e: Exception) -> None: PGcancelConn = module.PGcancelConn __build_version__ = module.__build_version__ elif impl: - raise ImportError(f"requested psycopg implementation '{impl}' unknown") + raise ImportError(f"requested gaussdb implementation '{impl}' unknown") else: sattempts = "\n".join(f"- {attempt}" for attempt in attempts) raise ImportError( diff --git a/psycopg/psycopg/pq/_debug.py b/gaussdb/gaussdb/pq/_debug.py similarity index 91% rename from psycopg/psycopg/pq/_debug.py rename to gaussdb/gaussdb/pq/_debug.py index d55be281d..862c31bd9 100644 --- a/psycopg/psycopg/pq/_debug.py +++ b/gaussdb/gaussdb/pq/_debug.py @@ -7,18 +7,18 @@ Suggested usage:: import logging - import psycopg - from psycopg import pq - from psycopg.pq._debug import PGconnDebug + import gaussdb + from gaussdb import pq + from gaussdb.pq._debug import PGconnDebug logging.basicConfig(level=logging.INFO, format="%(message)s") - logger = logging.getLogger("psycopg.debug") + logger = logging.getLogger("gaussdb.debug") logger.setLevel(logging.INFO) assert pq.__impl__ == "python" pq.PGconn = PGconnDebug - with psycopg.connect("") as conn: + with gaussdb.connect("") as conn: conn.pgconn.trace(2) conn.pgconn.set_trace_flags( pq.Trace.SUPPRESS_TIMESTAMPS | pq.Trace.REGRESS_MODE) @@ -26,7 +26,7 @@ """ -# Copyright (C) 2022 The Psycopg Team +# Copyright (C) 2022 The GaussDB Team import inspect import logging @@ -39,7 +39,7 @@ Func = TypeVar("Func", bound=Callable[..., Any]) -logger = logging.getLogger("psycopg.debug") +logger = logging.getLogger("gaussdb.debug") class PGconnDebug: diff --git a/psycopg/psycopg/pq/_enums.py b/gaussdb/gaussdb/pq/_enums.py similarity index 91% rename from psycopg/psycopg/pq/_enums.py rename to gaussdb/gaussdb/pq/_enums.py index 5d0cdf902..961d0cce5 100644 --- a/psycopg/psycopg/pq/_enums.py +++ b/gaussdb/gaussdb/pq/_enums.py @@ -1,8 +1,8 @@ """ -libpq enum definitions for psycopg +libpq enum definitions for gaussdb """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from enum import IntEnum, IntFlag, auto @@ -14,7 +14,7 @@ class ConnStatus(IntEnum): Current status of the connection. """ - __module__ = "psycopg.pq" + __module__ = "gaussdb.pq" OK = 0 """The connection is in a working state.""" @@ -44,7 +44,7 @@ class PollingStatus(IntEnum): If ``READING`` or ``WRITING`` you may select before polling again. """ - __module__ = "psycopg.pq" + __module__ = "gaussdb.pq" FAILED = 0 """Connection attempt failed.""" @@ -63,7 +63,7 @@ class ExecStatus(IntEnum): The status of a command. """ - __module__ = "psycopg.pq" + __module__ = "gaussdb.pq" EMPTY_QUERY = 0 """The string sent to the server was empty.""" @@ -135,7 +135,7 @@ class TransactionStatus(IntEnum): The transaction status of a connection. """ - __module__ = "psycopg.pq" + __module__ = "gaussdb.pq" IDLE = 0 """Connection ready, no transaction active.""" @@ -156,7 +156,7 @@ class TransactionStatus(IntEnum): class Ping(IntEnum): """Response from a ping attempt.""" - __module__ = "psycopg.pq" + __module__ = "gaussdb.pq" OK = 0 """ @@ -182,7 +182,7 @@ class Ping(IntEnum): class PipelineStatus(IntEnum): """Pipeline mode status of the libpq connection.""" - __module__ = "psycopg.pq" + __module__ = "gaussdb.pq" OFF = 0 """ @@ -205,7 +205,7 @@ class DiagnosticField(IntEnum): Fields in an error report. """ - __module__ = "psycopg.pq" + __module__ = "gaussdb.pq" # from src/include/postgres_ext.h @@ -233,11 +233,11 @@ class Format(IntEnum): """ Enum representing the format of a query argument or return value. - These values are only the ones managed by the libpq. `~psycopg` may also - support automatically-chosen values: see `psycopg.adapt.PyFormat`. + These values are only the ones managed by the libpq. `~gaussdb` may also + support automatically-chosen values: see `gaussdb.adapt.PyFormat`. """ - __module__ = "psycopg.pq" + __module__ = "gaussdb.pq" TEXT = 0 """Text parameter.""" @@ -250,7 +250,7 @@ class Trace(IntFlag): Enum to control tracing of the client/server communication. """ - __module__ = "psycopg.pq" + __module__ = "gaussdb.pq" SUPPRESS_TIMESTAMPS = 1 """Do not include timestamps in messages.""" diff --git a/psycopg/psycopg/pq/_pq_ctypes.py b/gaussdb/gaussdb/pq/_pq_ctypes.py similarity index 99% rename from psycopg/psycopg/pq/_pq_ctypes.py rename to gaussdb/gaussdb/pq/_pq_ctypes.py index c699eab3c..d6f3e7156 100644 --- a/psycopg/psycopg/pq/_pq_ctypes.py +++ b/gaussdb/gaussdb/pq/_pq_ctypes.py @@ -2,7 +2,7 @@ libpq access using ctypes """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations diff --git a/psycopg/psycopg/pq/_pq_ctypes.pyi b/gaussdb/gaussdb/pq/_pq_ctypes.pyi similarity index 98% rename from psycopg/psycopg/pq/_pq_ctypes.pyi rename to gaussdb/gaussdb/pq/_pq_ctypes.pyi index 891c88a40..48544d9f3 100644 --- a/psycopg/psycopg/pq/_pq_ctypes.pyi +++ b/gaussdb/gaussdb/pq/_pq_ctypes.pyi @@ -2,7 +2,7 @@ types stub for ctypes functions """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from ctypes import Array, _Pointer, c_char, c_char_p, c_int, c_ubyte, c_uint, c_ulong from ctypes import pointer @@ -105,7 +105,7 @@ def PQsetNoticeReceiver( ) -> Callable[[Any], PGresult_struct]: ... # TODO: Ignoring type as getting an error on mypy/ctypes: -# Type argument "psycopg.pq._pq_ctypes.PGnotify_struct" of "pointer" must be +# Type argument "gaussdb.pq._pq_ctypes.PGnotify_struct" of "pointer" must be # a subtype of "ctypes._CData" def PQnotifies( arg1: PGconn_struct | None, @@ -138,7 +138,7 @@ def PQsendFlushRequest(pgconn: PGconn_struct | None) -> int: ... # Autogenerated section. # In order to refresh, run: -# python -m psycopg.pq._pq_ctypes +# python -m gaussdb.pq._pq_ctypes # fmt: off # autogenerated: start diff --git a/psycopg/psycopg/pq/abc.py b/gaussdb/gaussdb/pq/abc.py similarity index 99% rename from psycopg/psycopg/pq/abc.py rename to gaussdb/gaussdb/pq/abc.py index 105c47f8b..e6a3aecc6 100644 --- a/psycopg/psycopg/pq/abc.py +++ b/gaussdb/gaussdb/pq/abc.py @@ -2,7 +2,7 @@ Protocol objects to represent objects exposed by different pq implementations. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations diff --git a/psycopg/psycopg/pq/misc.py b/gaussdb/gaussdb/pq/misc.py similarity index 98% rename from psycopg/psycopg/pq/misc.py rename to gaussdb/gaussdb/pq/misc.py index 92352a40d..053d3764e 100644 --- a/psycopg/psycopg/pq/misc.py +++ b/gaussdb/gaussdb/pq/misc.py @@ -2,7 +2,7 @@ Various functionalities to make easier to work with the libpq. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -18,7 +18,7 @@ from . import abc from ._enums import ConnStatus, PipelineStatus, TransactionStatus -logger = logging.getLogger("psycopg.pq") +logger = logging.getLogger("gaussdb.pq") OK = ConnStatus.OK diff --git a/psycopg/psycopg/pq/pq_ctypes.py b/gaussdb/gaussdb/pq/pq_ctypes.py similarity index 99% rename from psycopg/psycopg/pq/pq_ctypes.py rename to gaussdb/gaussdb/pq/pq_ctypes.py index 3c70281cc..1e4046386 100644 --- a/psycopg/psycopg/pq/pq_ctypes.py +++ b/gaussdb/gaussdb/pq/pq_ctypes.py @@ -8,7 +8,7 @@ implementation. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -37,7 +37,7 @@ __impl__ = "python" -logger = logging.getLogger("psycopg") +logger = logging.getLogger("gaussdb") OK = ConnStatus.OK @@ -45,7 +45,7 @@ def version() -> int: """Return the version number of the libpq currently loaded. - The number is in the same format of `~psycopg.ConnectionInfo.server_version`. + The number is in the same format of `~gaussdb.ConnectionInfo.server_version`. Certain features might not be available if the libpq library used is too old. """ @@ -151,7 +151,7 @@ def pgconn_ptr(self) -> int | None: `!None` if the connection is closed. The value can be used to pass the structure to libpq functions which - psycopg doesn't (currently) wrap, either in C or in Python using FFI + gaussdb doesn't (currently) wrap, either in C or in Python using FFI libraries such as `ctypes`. """ if self._pgconn_ptr is None: @@ -878,7 +878,7 @@ def pgresult_ptr(self) -> int | None: `!None` if the result was cleared. The value can be used to pass the structure to libpq functions which - psycopg doesn't (currently) wrap, either in C or in Python using FFI + gaussdb doesn't (currently) wrap, either in C or in Python using FFI libraries such as `ctypes`. """ if self._pgresult_ptr is None: diff --git a/psycopg/psycopg/py.typed b/gaussdb/gaussdb/py.typed similarity index 100% rename from psycopg/psycopg/py.typed rename to gaussdb/gaussdb/py.typed diff --git a/psycopg/psycopg/raw_cursor.py b/gaussdb/gaussdb/raw_cursor.py similarity index 91% rename from psycopg/psycopg/raw_cursor.py rename to gaussdb/gaussdb/raw_cursor.py index 5f42d7013..2a1fefb40 100644 --- a/psycopg/psycopg/raw_cursor.py +++ b/gaussdb/gaussdb/raw_cursor.py @@ -1,8 +1,8 @@ """ -psycopg raw queries cursors +gaussdb raw queries cursors """ -# Copyright (C) 2023 The Psycopg Team +# Copyright (C) 2023 The GaussDB Team from __future__ import annotations @@ -58,18 +58,18 @@ class RawCursorMixin(BaseCursor[ConnectionType, Row]): class RawCursor(RawCursorMixin["Connection[Any]", Row], Cursor[Row]): - __module__ = "psycopg" + __module__ = "gaussdb" class AsyncRawCursor(RawCursorMixin["AsyncConnection[Any]", Row], AsyncCursor[Row]): - __module__ = "psycopg" + __module__ = "gaussdb" class RawServerCursor(RawCursorMixin["Connection[Any]", Row], ServerCursor[Row]): - __module__ = "psycopg" + __module__ = "gaussdb" class AsyncRawServerCursor( RawCursorMixin["AsyncConnection[Any]", Row], AsyncServerCursor[Row] ): - __module__ = "psycopg" + __module__ = "gaussdb" diff --git a/psycopg/psycopg/rows.py b/gaussdb/gaussdb/rows.py similarity index 95% rename from psycopg/psycopg/rows.py rename to gaussdb/gaussdb/rows.py index d84928e56..74bfd05e2 100644 --- a/psycopg/psycopg/rows.py +++ b/gaussdb/gaussdb/rows.py @@ -1,8 +1,8 @@ """ -psycopg row factories +gaussdb row factories """ -# Copyright (C) 2021 The Psycopg Team +# Copyright (C) 2021 The GaussDB Team from __future__ import annotations @@ -51,11 +51,11 @@ def __call__(self, __values: Sequence[Any]) -> Row: ... class RowFactory(Protocol[Row]): """ - Callable protocol taking a `~psycopg.Cursor` and returning a `RowMaker`. + Callable protocol taking a `~gaussdb.Cursor` and returning a `RowMaker`. A `!RowFactory` is typically called when a `!Cursor` receives a result. This way it can inspect the cursor state (for instance the - `~psycopg.Cursor.description` attribute) and help a `!RowMaker` to create + `~gaussdb.Cursor.description` attribute) and help a `!RowMaker` to create a complete object. For instance the `dict_row()` `!RowFactory` uses the names of the column to @@ -100,8 +100,8 @@ def __call__(self, __cursor: BaseCursor[Any, Any]) -> RowMaker[Row]: ... def tuple_row(cursor: BaseCursor[Any, Any]) -> RowMaker[TupleRow]: r"""Row factory to represent rows as simple tuples. - This is the default factory, used when `~psycopg.Connection.connect()` or - `~psycopg.Connection.cursor()` are called without a `!row_factory` + This is the default factory, used when `~gaussdb.Connection.connect()` or + `~gaussdb.Connection.cursor()` are called without a `!row_factory` parameter. """ diff --git a/psycopg/psycopg/server_cursor.py b/gaussdb/gaussdb/server_cursor.py similarity index 99% rename from psycopg/psycopg/server_cursor.py rename to gaussdb/gaussdb/server_cursor.py index cf141a13c..ee0fb1dd7 100644 --- a/psycopg/psycopg/server_cursor.py +++ b/gaussdb/gaussdb/server_cursor.py @@ -1,8 +1,8 @@ """ -psycopg server-side cursor objects. +gaussdb server-side cursor objects. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -212,7 +212,7 @@ def _make_declare_statement(self, query: Query) -> sql.Composed: class ServerCursor(ServerCursorMixin["Connection[Any]", Row], Cursor[Row]): - __module__ = "psycopg" + __module__ = "gaussdb" __slots__ = () @overload @@ -351,7 +351,7 @@ def scroll(self, value: int, mode: str = "relative") -> None: class AsyncServerCursor( ServerCursorMixin["AsyncConnection[Any]", Row], AsyncCursor[Row] ): - __module__ = "psycopg" + __module__ = "gaussdb" __slots__ = () @overload diff --git a/psycopg/psycopg/sql.py b/gaussdb/gaussdb/sql.py similarity index 96% rename from psycopg/psycopg/sql.py rename to gaussdb/gaussdb/sql.py index b98a816cc..decb0dbd5 100644 --- a/psycopg/psycopg/sql.py +++ b/gaussdb/gaussdb/sql.py @@ -2,7 +2,7 @@ SQL composition utility module """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -47,8 +47,8 @@ class Composable(ABC): requested. `!SQL` and `!Composed` objects can be passed directly to - `~psycopg.Cursor.execute()`, `~psycopg.Cursor.executemany()`, - `~psycopg.Cursor.copy()` in place of the query string. + `~gaussdb.Cursor.execute()`, `~gaussdb.Cursor.executemany()`, + `~gaussdb.Cursor.copy()` in place of the query string. """ def __init__(self, obj: Any): @@ -65,8 +65,8 @@ def as_bytes(self, context: AdaptContext | None = None) -> bytes: :param context: the context to evaluate the object into. :type context: `connection` or `cursor` - The method is automatically invoked by `~psycopg.Cursor.execute()`, - `~psycopg.Cursor.executemany()`, `~psycopg.Cursor.copy()` if a + The method is automatically invoked by `~gaussdb.Cursor.execute()`, + `~gaussdb.Cursor.executemany()`, `~gaussdb.Cursor.copy()` if a `!Composable` is passed instead of the query string. """ @@ -113,8 +113,8 @@ class Composed(Composable): The object is usually created using `!Composable` operators and methods (such as the `SQL.format()` method). `!Composed` objects can be passed - directly to `~psycopg.Cursor.execute()`, `~psycopg.Cursor.executemany()`, - `~psycopg.Cursor.copy()` in place of the query string. + directly to `~gaussdb.Cursor.execute()`, `~gaussdb.Cursor.executemany()`, + `~gaussdb.Cursor.copy()` in place of the query string. It is also possible to create a `!Composed` directly specifying a sequence of objects as arguments: if they are not `!Composable` they will be wrapped @@ -190,8 +190,8 @@ class SQL(Composable): statements; use other objects such as `Identifier` or `Literal` to represent variable parts. - `!SQL` objects can be passed directly to `~psycopg.Cursor.execute()`, - `~psycopg.Cursor.executemany()`, `~psycopg.Cursor.copy()` in place of the + `!SQL` objects can be passed directly to `~gaussdb.Cursor.execute()`, + `~gaussdb.Cursor.executemany()`, `~gaussdb.Cursor.copy()` in place of the query string. Example:: diff --git a/psycopg/psycopg/transaction.py b/gaussdb/gaussdb/transaction.py similarity index 98% rename from psycopg/psycopg/transaction.py rename to gaussdb/gaussdb/transaction.py index 22d40d6ba..020b31685 100644 --- a/psycopg/psycopg/transaction.py +++ b/gaussdb/gaussdb/transaction.py @@ -2,7 +2,7 @@ Transaction context managers returned by Connection.transaction() """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -37,7 +37,7 @@ class Rollback(Exception): enclosing transactions contexts up to and including the one specified. """ - __module__ = "psycopg" + __module__ = "gaussdb" def __init__(self, transaction: Transaction | AsyncTransaction | None = None): self.transaction = transaction @@ -231,7 +231,7 @@ class Transaction(BaseTransaction["Connection[Any]"]): Returned by `Connection.transaction()` to handle a transaction block. """ - __module__ = "psycopg" + __module__ = "gaussdb" @property def connection(self) -> Connection[Any]: @@ -261,7 +261,7 @@ class AsyncTransaction(BaseTransaction["AsyncConnection[Any]"]): Returned by `AsyncConnection.transaction()` to handle a transaction block. """ - __module__ = "psycopg" + __module__ = "gaussdb" @property def connection(self) -> AsyncConnection[Any]: diff --git a/psycopg/psycopg/types/__init__.py b/gaussdb/gaussdb/types/__init__.py similarity index 66% rename from psycopg/psycopg/types/__init__.py rename to gaussdb/gaussdb/types/__init__.py index bdddf054d..9a4025223 100644 --- a/psycopg/psycopg/types/__init__.py +++ b/gaussdb/gaussdb/types/__init__.py @@ -1,8 +1,8 @@ """ -psycopg types package +gaussdb types package """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from .. import _typeinfo diff --git a/psycopg/psycopg/types/array.py b/gaussdb/gaussdb/types/array.py similarity index 98% rename from psycopg/psycopg/types/array.py rename to gaussdb/gaussdb/types/array.py index 5e322a6f3..ee36cbe85 100644 --- a/psycopg/psycopg/types/array.py +++ b/gaussdb/gaussdb/types/array.py @@ -2,7 +2,7 @@ Adapters for arrays """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -18,7 +18,7 @@ from .._oids import INVALID_OID, TEXT_ARRAY_OID, TEXT_OID from ..adapt import PyFormat, RecursiveDumper, RecursiveLoader from .._struct import pack_len, unpack_len -from .._cmodule import _psycopg +from .._cmodule import _gaussdb from .._typeinfo import TypeInfo _struct_head = struct.Struct("!III") # ndims, hasnull, elem oid @@ -323,7 +323,7 @@ def register_array(info: TypeInfo, context: AdaptContext | None = None) -> None: adapters.register_loader(info.array_oid, loader) # No need to make a new loader because the binary datum has all the info. - loader = getattr(_psycopg, "ArrayBinaryLoader", ArrayBinaryLoader) + loader = getattr(_gaussdb, "ArrayBinaryLoader", ArrayBinaryLoader) adapters.register_loader(info.array_oid, loader) dumper = _make_dumper(info.name, info.oid, info.array_oid, info.delimiter) @@ -342,7 +342,7 @@ def _make_loader(name: str, oid: int, delimiter: str) -> type[Loader]: # Note: caching this function is really needed because, if the C extension # is available, the resulting type cannot be GC'd, so calling # register_array() in a loop results in a leak. See #647. - base = getattr(_psycopg, "ArrayLoader", ArrayLoader) + base = getattr(_gaussdb, "ArrayLoader", ArrayLoader) attribs = {"base_oid": oid, "delimiter": delimiter.encode()} return type(f"{name.title()}{base.__name__}", (base,), attribs) diff --git a/psycopg/psycopg/types/bool.py b/gaussdb/gaussdb/types/bool.py similarity index 96% rename from psycopg/psycopg/types/bool.py rename to gaussdb/gaussdb/types/bool.py index 8f367b80e..e2071ed83 100644 --- a/psycopg/psycopg/types/bool.py +++ b/gaussdb/gaussdb/types/bool.py @@ -2,7 +2,7 @@ Adapters for booleans. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations diff --git a/psycopg/psycopg/types/composite.py b/gaussdb/gaussdb/types/composite.py similarity index 99% rename from psycopg/psycopg/types/composite.py rename to gaussdb/gaussdb/types/composite.py index b29279562..05659e30c 100644 --- a/psycopg/psycopg/types/composite.py +++ b/gaussdb/gaussdb/types/composite.py @@ -2,7 +2,7 @@ Support for composite types adaptation. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations diff --git a/psycopg/psycopg/types/datetime.py b/gaussdb/gaussdb/types/datetime.py similarity index 99% rename from psycopg/psycopg/types/datetime.py rename to gaussdb/gaussdb/types/datetime.py index 016e216bc..5cf1cc1b0 100644 --- a/psycopg/psycopg/types/datetime.py +++ b/gaussdb/gaussdb/types/datetime.py @@ -2,7 +2,7 @@ Adapters for date/time types. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations diff --git a/psycopg/psycopg/types/enum.py b/gaussdb/gaussdb/types/enum.py similarity index 100% rename from psycopg/psycopg/types/enum.py rename to gaussdb/gaussdb/types/enum.py diff --git a/psycopg/psycopg/types/hstore.py b/gaussdb/gaussdb/types/hstore.py similarity index 99% rename from psycopg/psycopg/types/hstore.py rename to gaussdb/gaussdb/types/hstore.py index 4735b480a..749154c8f 100644 --- a/psycopg/psycopg/types/hstore.py +++ b/gaussdb/gaussdb/types/hstore.py @@ -2,7 +2,7 @@ dict to hstore adaptation """ -# Copyright (C) 2021 The Psycopg Team +# Copyright (C) 2021 The GaussDB Team from __future__ import annotations diff --git a/psycopg/psycopg/types/json.py b/gaussdb/gaussdb/types/json.py similarity index 97% rename from psycopg/psycopg/types/json.py rename to gaussdb/gaussdb/types/json.py index 2bf389c68..b09f7856c 100644 --- a/psycopg/psycopg/types/json.py +++ b/gaussdb/gaussdb/types/json.py @@ -2,7 +2,7 @@ Adapters for JSON types. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -31,7 +31,7 @@ def set_json_dumps( :type dumps: `!Callable[[Any], str]` :param context: Where to use the `!dumps` function. If not specified, use it globally. - :type context: `~psycopg.Connection` or `~psycopg.Cursor` + :type context: `~gaussdb.Connection` or `~gaussdb.Cursor` By default dumping JSON uses the builtin `json.dumps`. You can override it to use a different JSON library or to use customised arguments. @@ -70,7 +70,7 @@ def set_json_loads( :type loads: `!Callable[[bytes], Any]` :param context: Where to use the `!loads` function. If not specified, use it globally. - :type context: `~psycopg.Connection` or `~psycopg.Cursor` + :type context: `~gaussdb.Connection` or `~gaussdb.Cursor` By default loading JSON uses the builtin `json.loads`. You can override it to use a different JSON library or to use customised arguments. diff --git a/psycopg/psycopg/types/multirange.py b/gaussdb/gaussdb/types/multirange.py similarity index 99% rename from psycopg/psycopg/types/multirange.py rename to gaussdb/gaussdb/types/multirange.py index edb67968f..042a76361 100644 --- a/psycopg/psycopg/types/multirange.py +++ b/gaussdb/gaussdb/types/multirange.py @@ -2,7 +2,7 @@ Support for multirange types adaptation. """ -# Copyright (C) 2021 The Psycopg Team +# Copyright (C) 2021 The GaussDB Team from __future__ import annotations diff --git a/psycopg/psycopg/types/net.py b/gaussdb/gaussdb/types/net.py similarity index 99% rename from psycopg/psycopg/types/net.py rename to gaussdb/gaussdb/types/net.py index 31b9a961a..7ca9b916c 100644 --- a/psycopg/psycopg/types/net.py +++ b/gaussdb/gaussdb/types/net.py @@ -2,7 +2,7 @@ Adapters for network types. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations diff --git a/psycopg/psycopg/types/none.py b/gaussdb/gaussdb/types/none.py similarity index 94% rename from psycopg/psycopg/types/none.py rename to gaussdb/gaussdb/types/none.py index 723e06d1c..c217d039e 100644 --- a/psycopg/psycopg/types/none.py +++ b/gaussdb/gaussdb/types/none.py @@ -2,7 +2,7 @@ Adapters for None. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations diff --git a/psycopg/psycopg/types/numeric.py b/gaussdb/gaussdb/types/numeric.py similarity index 99% rename from psycopg/psycopg/types/numeric.py rename to gaussdb/gaussdb/types/numeric.py index 8c0e64ed1..9a3a614a0 100644 --- a/psycopg/psycopg/types/numeric.py +++ b/gaussdb/gaussdb/types/numeric.py @@ -2,7 +2,7 @@ Adapters for numeric types. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations diff --git a/psycopg/psycopg/types/numpy.py b/gaussdb/gaussdb/types/numpy.py similarity index 98% rename from psycopg/psycopg/types/numpy.py rename to gaussdb/gaussdb/types/numpy.py index e83b9731c..7941af7ec 100644 --- a/psycopg/psycopg/types/numpy.py +++ b/gaussdb/gaussdb/types/numpy.py @@ -2,7 +2,7 @@ Adapters for numpy types. """ -# Copyright (C) 2022 The Psycopg Team +# Copyright (C) 2022 The GaussDB Team from typing import Any diff --git a/psycopg/psycopg/types/range.py b/gaussdb/gaussdb/types/range.py similarity index 99% rename from psycopg/psycopg/types/range.py rename to gaussdb/gaussdb/types/range.py index 6a4da54bf..f7de846cb 100644 --- a/psycopg/psycopg/types/range.py +++ b/gaussdb/gaussdb/types/range.py @@ -2,7 +2,7 @@ Support for range types adaptation. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations diff --git a/psycopg/psycopg/types/shapely.py b/gaussdb/gaussdb/types/shapely.py similarity index 97% rename from psycopg/psycopg/types/shapely.py rename to gaussdb/gaussdb/types/shapely.py index a4fa6859f..449054710 100644 --- a/psycopg/psycopg/types/shapely.py +++ b/gaussdb/gaussdb/types/shapely.py @@ -17,7 +17,7 @@ except ImportError: raise ImportError( - "The module psycopg.types.shapely requires the package 'Shapely'" + "The module gaussdb.types.shapely requires the package 'Shapely'" " to be installed" ) diff --git a/psycopg/psycopg/types/string.py b/gaussdb/gaussdb/types/string.py similarity index 99% rename from psycopg/psycopg/types/string.py rename to gaussdb/gaussdb/types/string.py index 262ac658c..e7fafb72e 100644 --- a/psycopg/psycopg/types/string.py +++ b/gaussdb/gaussdb/types/string.py @@ -2,7 +2,7 @@ Adapters for textual types. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations diff --git a/psycopg/psycopg/types/uuid.py b/gaussdb/gaussdb/types/uuid.py similarity index 97% rename from psycopg/psycopg/types/uuid.py rename to gaussdb/gaussdb/types/uuid.py index 596337746..30e47cd7a 100644 --- a/psycopg/psycopg/types/uuid.py +++ b/gaussdb/gaussdb/types/uuid.py @@ -2,7 +2,7 @@ Adapters for the UUID type. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations diff --git a/gaussdb/gaussdb/version.py b/gaussdb/gaussdb/version.py new file mode 100644 index 000000000..0fa864dd4 --- /dev/null +++ b/gaussdb/gaussdb/version.py @@ -0,0 +1,12 @@ +""" +gaussdb distribution version file. +""" + +# Copyright (C) 2020 The GaussDB Team + +from importlib import metadata + +try: + __version__ = metadata.version("gaussdb") +except metadata.PackageNotFoundError: + __version__ = "0.0.0.0" diff --git a/psycopg/psycopg/waiting.py b/gaussdb/gaussdb/waiting.py similarity index 97% rename from psycopg/psycopg/waiting.py rename to gaussdb/gaussdb/waiting.py index 05994b5d5..f460610f8 100644 --- a/psycopg/psycopg/waiting.py +++ b/gaussdb/gaussdb/waiting.py @@ -6,7 +6,7 @@ """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team from __future__ import annotations @@ -22,7 +22,7 @@ from .abc import RV, PQGen, PQGenConn, WaitFunc from ._enums import Ready as Ready from ._enums import Wait as Wait # re-exported -from ._cmodule import _psycopg +from ._cmodule import _gaussdb WAIT_R = Wait.R WAIT_W = Wait.W @@ -284,7 +284,7 @@ def wait_epoll(gen: PQGen[RV], fileno: int, interval: float | None = None) -> RV EpollSelector. For this reason, wait_poll() is currently preferable. To reproduce the bug: - export PSYCOPG_WAIT_FUNC=wait_epoll + export GAUSSDB_WAIT_FUNC=wait_epoll pytest tests/test_concurrency.py::test_concurrent_close """ try: @@ -385,8 +385,8 @@ def _is_select_patched() -> bool: return False -if _psycopg: - wait_c = _psycopg.wait_c +if _gaussdb: + wait_c = _gaussdb.wait_c # Choose the best wait strategy for the platform. @@ -397,11 +397,11 @@ def _is_select_patched() -> bool: wait: WaitFunc # Allow the user to choose a specific function for testing -if "PSYCOPG_WAIT_FUNC" in os.environ: - fname = os.environ["PSYCOPG_WAIT_FUNC"] +if "GAUSSDB_WAIT_FUNC" in os.environ: + fname = os.environ["GAUSSDB_WAIT_FUNC"] if not fname.startswith("wait_") or fname not in globals(): raise ImportError( - "PSYCOPG_WAIT_FUNC should be the name of an available wait function;" + "GAUSSDB_WAIT_FUNC should be the name of an available wait function;" f" got {fname!r}" ) wait = globals()[fname] @@ -409,7 +409,7 @@ def _is_select_patched() -> bool: # On Windows, for the moment, avoid using wait_c, because it was reported to # use excessive CPU (see #645). # TODO: investigate why. -elif _psycopg and sys.platform != "win32" and not _is_select_patched(): +elif _gaussdb and sys.platform != "win32" and not _is_select_patched(): wait = wait_c elif selectors.DefaultSelector is getattr(selectors, "SelectSelector", None): diff --git a/psycopg/pyproject.toml b/gaussdb/pyproject.toml similarity index 83% rename from psycopg/pyproject.toml rename to gaussdb/pyproject.toml index d0b8e4585..00b58ff9b 100644 --- a/psycopg/pyproject.toml +++ b/gaussdb/pyproject.toml @@ -3,14 +3,14 @@ requires = ["setuptools>=49.2.0", "wheel>=0.37"] build-backend = "setuptools.build_meta" [project] -name = "psycopg" +name = "gaussdb" description = "PostgreSQL database adapter for Python" # STOP AND READ! if you change: -version = "3.3.0.dev1" +version = "1.0.0.dev1" # also change: # - `docs/news.rst` to declare this as the current version or an unreleased one; -# - `psycopg_c/pyproject.toml` to the same version; +# - `gaussdb_c/pyproject.toml` to the same version; # - the `c ` and `binary` "optional-dependencies" below to the same version. # # NOTE: you can use `tools/bump_version.py` to maintain versions. @@ -49,11 +49,11 @@ email = "daniele.varrazzo@gmail.com" text = "GNU Lesser General Public License v3 (LGPLv3)" [project.urls] -Homepage = "https://psycopg.org/" -Documentation = "https://psycopg.org/psycopg3/docs/" -Changes = "https://psycopg.org/psycopg3/docs/news.html" -Code = "https://github.com/psycopg/psycopg" -"Issue Tracker" = "https://github.com/psycopg/psycopg/issues" +Homepage = "https://gaussdb.org/" +Documentation = "https://gaussdb.org/gaussdb/docs/" +Changes = "https://gaussdb.org/gaussdb/docs/news.html" +Code = "https://github.com/gaussdb/gaussdb" +"Issue Tracker" = "https://github.com/gaussdb/gaussdb/issues" [project.readme] file = "README.rst" @@ -61,13 +61,13 @@ content-type = "text/x-rst" [project.optional-dependencies] c = [ - "psycopg-c == 3.3.0.dev1; implementation_name != \"pypy\"", + "gaussdb-c == 1.0.0.dev1; implementation_name != \"pypy\"", ] binary = [ - "psycopg-binary == 3.3.0.dev1; implementation_name != \"pypy\"", + "gaussdb-binary == 1.0.0.dev1; implementation_name != \"pypy\"", ] pool = [ - "psycopg-pool", + "gaussdb-pool", ] test = [ "anyio >= 4.0", @@ -84,7 +84,7 @@ dev = [ "dnspython >= 2.1", "flake8 >= 4.0", "isort[colors] >= 6.0", - "isort-psycopg", + "isort-gaussdb", "mypy >= 1.14", "pre-commit >= 4.0.1", "types-setuptools >= 57.4", @@ -104,7 +104,7 @@ license-files = ["LICENSE.txt"] include-package-data = true [tool.setuptools.package-data] -psycopg = [ +gaussdb = [ "py.typed", ] diff --git a/psycopg_c/LICENSE.txt b/gaussdb_pool/LICENSE.txt similarity index 100% rename from psycopg_c/LICENSE.txt rename to gaussdb_pool/LICENSE.txt diff --git a/gaussdb_pool/README.rst b/gaussdb_pool/README.rst new file mode 100644 index 000000000..0eaee00d5 --- /dev/null +++ b/gaussdb_pool/README.rst @@ -0,0 +1,25 @@ +gaussdb: PostgreSQL database adapter for Python - Connection Pool +=================================================================== + +This distribution package is an optional component of `gaussdb`__: it +contains the optional connection pool package `gaussdb_pool`__. + +.. __: https://pypi.org/project/gaussdb/ +.. __: https://www.gaussdb.org/gaussdb/docs/advanced/pool.html + +This package is kept separate from the main ``gaussdb`` package because it is +likely that it will follow a different release cycle. + +You can also install this package using:: + + pip install "gaussdb[pool]" + +Please read `the project readme`__ and `the installation documentation`__ for +more details. + +.. __: https://github.com/gaussdb/gaussdb#readme +.. __: https://www.gaussdb.org/gaussdb/docs/basic/install.html + #installing-the-connection-pool + + +Copyright (C) 2020 The GaussDB Team diff --git a/psycopg_pool/psycopg_pool/__init__.py b/gaussdb_pool/gaussdb_pool/__init__.py similarity index 87% rename from psycopg_pool/psycopg_pool/__init__.py rename to gaussdb_pool/gaussdb_pool/__init__.py index f29381d57..4a6b2fd43 100644 --- a/psycopg_pool/psycopg_pool/__init__.py +++ b/gaussdb_pool/gaussdb_pool/__init__.py @@ -1,8 +1,8 @@ """ -psycopg connection pool package +gaussdb connection pool package """ -# Copyright (C) 2021 The Psycopg Team +# Copyright (C) 2021 The GaussDB Team from .pool import ConnectionPool from .errors import PoolClosed, PoolTimeout, TooManyRequests diff --git a/psycopg_pool/psycopg_pool/_acompat.py b/gaussdb_pool/gaussdb_pool/_acompat.py similarity index 98% rename from psycopg_pool/psycopg_pool/_acompat.py rename to gaussdb_pool/gaussdb_pool/_acompat.py index aede93137..356f680f1 100644 --- a/psycopg_pool/psycopg_pool/_acompat.py +++ b/gaussdb_pool/gaussdb_pool/_acompat.py @@ -6,7 +6,7 @@ when generating the sync version. """ -# Copyright (C) 2023 The Psycopg Team +# Copyright (C) 2023 The GaussDB Team from __future__ import annotations @@ -20,7 +20,7 @@ from ._compat import TypeAlias, TypeVar -logger = logging.getLogger("psycopg.pool") +logger = logging.getLogger("gaussdb.pool") T = TypeVar("T") # Re-exports diff --git a/psycopg_pool/psycopg_pool/_compat.py b/gaussdb_pool/gaussdb_pool/_compat.py similarity index 88% rename from psycopg_pool/psycopg_pool/_compat.py rename to gaussdb_pool/gaussdb_pool/_compat.py index be6a967e0..5d6982742 100644 --- a/psycopg_pool/psycopg_pool/_compat.py +++ b/gaussdb_pool/gaussdb_pool/_compat.py @@ -2,7 +2,7 @@ compatibility functions for different Python versions """ -# Copyright (C) 2021 The Psycopg Team +# Copyright (C) 2021 The GaussDB Team from __future__ import annotations @@ -23,7 +23,7 @@ else: from typing_extensions import TypeVar -import psycopg.errors as e +import gaussdb.errors as e __all__ = [ "Self", @@ -31,7 +31,7 @@ "TypeVar", ] -# Workaround for psycopg < 3.0.8. +# Workaround for gaussdb < 3.0.8. # Timeout on NullPool connection mignt not work correctly. try: ConnectionTimeout: type[e.OperationalError] = e.ConnectionTimeout diff --git a/psycopg_pool/psycopg_pool/_task.py b/gaussdb_pool/gaussdb_pool/_task.py similarity index 87% rename from psycopg_pool/psycopg_pool/_task.py rename to gaussdb_pool/gaussdb_pool/_task.py index 7629d1c46..c0275b956 100644 --- a/psycopg_pool/psycopg_pool/_task.py +++ b/gaussdb_pool/gaussdb_pool/_task.py @@ -2,7 +2,7 @@ Task for Scheduler and AsyncScheduler """ -# Copyright (C) 2023 The Psycopg Team +# Copyright (C) 2023 The GaussDB Team from __future__ import annotations diff --git a/psycopg_pool/psycopg_pool/abc.py b/gaussdb_pool/gaussdb_pool/abc.py similarity index 83% rename from psycopg_pool/psycopg_pool/abc.py rename to gaussdb_pool/gaussdb_pool/abc.py index a9783626f..3f9b22dd9 100644 --- a/psycopg_pool/psycopg_pool/abc.py +++ b/gaussdb_pool/gaussdb_pool/abc.py @@ -1,8 +1,8 @@ """ -Types used in the psycopg_pool package +Types used in the gaussdb_pool package """ -# Copyright (C) 2023 The Psycopg Team +# Copyright (C) 2023 The GaussDB Team from __future__ import annotations @@ -14,8 +14,8 @@ if TYPE_CHECKING: from typing import Any # noqa: F401 - from psycopg import AsyncConnection, Connection # noqa: F401 - from psycopg.rows import TupleRow # noqa: F401 + from gaussdb import AsyncConnection, Connection # noqa: F401 + from gaussdb.rows import TupleRow # noqa: F401 from .pool import ConnectionPool # noqa: F401 from .pool_async import AsyncConnectionPool # noqa: F401 diff --git a/psycopg_pool/psycopg_pool/base.py b/gaussdb_pool/gaussdb_pool/base.py similarity index 97% rename from psycopg_pool/psycopg_pool/base.py rename to gaussdb_pool/gaussdb_pool/base.py index 026b95812..c7c3dae4f 100644 --- a/psycopg_pool/psycopg_pool/base.py +++ b/gaussdb_pool/gaussdb_pool/base.py @@ -1,8 +1,8 @@ """ -psycopg connection pool base class and functionalities. +gaussdb connection pool base class and functionalities. """ -# Copyright (C) 2021 The Psycopg Team +# Copyright (C) 2021 The GaussDB Team from __future__ import annotations @@ -11,12 +11,12 @@ from typing import TYPE_CHECKING, Any from collections import Counter, deque -from psycopg import errors as e +from gaussdb import errors as e from .errors import PoolClosed if TYPE_CHECKING: - from psycopg._connection_base import BaseConnection + from gaussdb._connection_base import BaseConnection class BasePool: diff --git a/psycopg_pool/psycopg_pool/base_null_pool.py b/gaussdb_pool/gaussdb_pool/base_null_pool.py similarity index 89% rename from psycopg_pool/psycopg_pool/base_null_pool.py rename to gaussdb_pool/gaussdb_pool/base_null_pool.py index c75a2290d..0ddfec358 100644 --- a/psycopg_pool/psycopg_pool/base_null_pool.py +++ b/gaussdb_pool/gaussdb_pool/base_null_pool.py @@ -1,8 +1,8 @@ """ -Psycopg mixin class for null connection pools +GaussDB mixin class for null connection pools """ -# Copyright (C) 2022 The Psycopg Team +# Copyright (C) 2022 The GaussDB Team from __future__ import annotations diff --git a/psycopg_pool/psycopg_pool/errors.py b/gaussdb_pool/gaussdb_pool/errors.py similarity index 69% rename from psycopg_pool/psycopg_pool/errors.py rename to gaussdb_pool/gaussdb_pool/errors.py index 9e672adc0..56bd7db05 100644 --- a/psycopg_pool/psycopg_pool/errors.py +++ b/gaussdb_pool/gaussdb_pool/errors.py @@ -2,24 +2,24 @@ Connection pool errors. """ -# Copyright (C) 2021 The Psycopg Team +# Copyright (C) 2021 The GaussDB Team -from psycopg import errors as e +from gaussdb import errors as e class PoolClosed(e.OperationalError): """Attempt to get a connection from a closed pool.""" - __module__ = "psycopg_pool" + __module__ = "gaussdb_pool" class PoolTimeout(e.OperationalError): """The pool couldn't provide a connection in acceptable time.""" - __module__ = "psycopg_pool" + __module__ = "gaussdb_pool" class TooManyRequests(e.OperationalError): """Too many requests in the queue waiting for a connection from the pool.""" - __module__ = "psycopg_pool" + __module__ = "gaussdb_pool" diff --git a/psycopg_pool/psycopg_pool/null_pool.py b/gaussdb_pool/gaussdb_pool/null_pool.py similarity index 96% rename from psycopg_pool/psycopg_pool/null_pool.py rename to gaussdb_pool/gaussdb_pool/null_pool.py index 37b7bc707..a48c83df4 100644 --- a/psycopg_pool/psycopg_pool/null_pool.py +++ b/gaussdb_pool/gaussdb_pool/null_pool.py @@ -2,18 +2,18 @@ # from the original file 'null_pool_async.py' # DO NOT CHANGE! Change the original file instead. """ -Psycopg null connection pool module (sync version). +GaussDB null connection pool module (sync version). """ -# Copyright (C) 2022 The Psycopg Team +# Copyright (C) 2022 The GaussDB Team from __future__ import annotations import logging from typing import Any, cast -from psycopg import Connection -from psycopg.pq import TransactionStatus +from gaussdb import Connection +from gaussdb.pq import TransactionStatus from .abc import CT, ConnectFailedCB, ConnectionCB from .pool import AddConnection, ConnectionPool @@ -22,7 +22,7 @@ from ._acompat import Event from .base_null_pool import _BaseNullConnectionPool -logger = logging.getLogger("psycopg.pool") +logger = logging.getLogger("gaussdb.pool") class NullConnectionPool(_BaseNullConnectionPool, ConnectionPool[CT]): diff --git a/psycopg_pool/psycopg_pool/null_pool_async.py b/gaussdb_pool/gaussdb_pool/null_pool_async.py similarity index 96% rename from psycopg_pool/psycopg_pool/null_pool_async.py rename to gaussdb_pool/gaussdb_pool/null_pool_async.py index b73504824..e6331843e 100644 --- a/psycopg_pool/psycopg_pool/null_pool_async.py +++ b/gaussdb_pool/gaussdb_pool/null_pool_async.py @@ -1,16 +1,16 @@ """ -Psycopg null connection pool module (async version). +GaussDB null connection pool module (async version). """ -# Copyright (C) 2022 The Psycopg Team +# Copyright (C) 2022 The GaussDB Team from __future__ import annotations import logging from typing import Any, cast -from psycopg import AsyncConnection -from psycopg.pq import TransactionStatus +from gaussdb import AsyncConnection +from gaussdb.pq import TransactionStatus from .abc import ACT, AsyncConnectFailedCB, AsyncConnectionCB from .errors import PoolTimeout, TooManyRequests @@ -19,7 +19,7 @@ from .pool_async import AddConnection, AsyncConnectionPool from .base_null_pool import _BaseNullConnectionPool -logger = logging.getLogger("psycopg.pool") +logger = logging.getLogger("gaussdb.pool") class AsyncNullConnectionPool(_BaseNullConnectionPool, AsyncConnectionPool[ACT]): diff --git a/psycopg_pool/psycopg_pool/pool.py b/gaussdb_pool/gaussdb_pool/pool.py similarity index 99% rename from psycopg_pool/psycopg_pool/pool.py rename to gaussdb_pool/gaussdb_pool/pool.py index f6a7139f3..a12f1725a 100644 --- a/psycopg_pool/psycopg_pool/pool.py +++ b/gaussdb_pool/gaussdb_pool/pool.py @@ -2,10 +2,10 @@ # from the original file 'pool_async.py' # DO NOT CHANGE! Change the original file instead. """ -Psycopg connection pool module (sync version). +GaussDB connection pool module (sync version). """ -# Copyright (C) 2021 The Psycopg Team +# Copyright (C) 2021 The GaussDB Team from __future__ import annotations @@ -20,9 +20,9 @@ from collections import deque from collections.abc import Iterator -from psycopg import Connection -from psycopg import errors as e -from psycopg.pq import TransactionStatus +from gaussdb import Connection +from gaussdb import errors as e +from gaussdb.pq import TransactionStatus from .abc import CT, ConnectFailedCB, ConnectionCB from .base import AttemptWithBackoff, BasePool @@ -32,7 +32,7 @@ from ._acompat import Condition, Event, Lock, Queue, Worker, current_thread_name from ._acompat import gather, sleep, spawn -logger = logging.getLogger("psycopg.pool") +logger = logging.getLogger("gaussdb.pool") class ConnectionPool(Generic[CT], BasePool): diff --git a/psycopg_pool/psycopg_pool/pool_async.py b/gaussdb_pool/gaussdb_pool/pool_async.py similarity index 99% rename from psycopg_pool/psycopg_pool/pool_async.py rename to gaussdb_pool/gaussdb_pool/pool_async.py index a8258a4da..c144f8a0e 100644 --- a/psycopg_pool/psycopg_pool/pool_async.py +++ b/gaussdb_pool/gaussdb_pool/pool_async.py @@ -1,8 +1,8 @@ """ -Psycopg connection pool module (async version). +GaussDB connection pool module (async version). """ -# Copyright (C) 2021 The Psycopg Team +# Copyright (C) 2021 The GaussDB Team from __future__ import annotations @@ -17,9 +17,9 @@ from collections import deque from collections.abc import AsyncIterator -from psycopg import AsyncConnection -from psycopg import errors as e -from psycopg.pq import TransactionStatus +from gaussdb import AsyncConnection +from gaussdb import errors as e +from gaussdb.pq import TransactionStatus from .abc import ACT, AsyncConnectFailedCB, AsyncConnectionCB from .base import AttemptWithBackoff, BasePool @@ -32,7 +32,7 @@ if True: # ASYNC import asyncio -logger = logging.getLogger("psycopg.pool") +logger = logging.getLogger("gaussdb.pool") class AsyncConnectionPool(Generic[ACT], BasePool): @@ -577,7 +577,7 @@ async def check_connection(conn: ACT) -> None: await conn.execute("") else: if True: # ASYNC - # NOTE: with Psycopg 3.2 we could use conn.set_autocommit() in + # NOTE: with gaussdb.2 we could use conn.set_autocommit() in # the sync code too, but we want the pool to be compatible with # previous versions too. await conn.set_autocommit(True) diff --git a/psycopg_c/psycopg_c/py.typed b/gaussdb_pool/gaussdb_pool/py.typed similarity index 100% rename from psycopg_c/psycopg_c/py.typed rename to gaussdb_pool/gaussdb_pool/py.typed diff --git a/psycopg_pool/psycopg_pool/sched.py b/gaussdb_pool/gaussdb_pool/sched.py similarity index 98% rename from psycopg_pool/psycopg_pool/sched.py rename to gaussdb_pool/gaussdb_pool/sched.py index f41179858..6566b5c1a 100644 --- a/psycopg_pool/psycopg_pool/sched.py +++ b/gaussdb_pool/gaussdb_pool/sched.py @@ -13,7 +13,7 @@ `[threading/asyncio].Event` and the two would be confusing. """ -# Copyright (C) 2021 The Psycopg Team +# Copyright (C) 2021 The GaussDB Team from __future__ import annotations diff --git a/psycopg_pool/psycopg_pool/sched_async.py b/gaussdb_pool/gaussdb_pool/sched_async.py similarity index 98% rename from psycopg_pool/psycopg_pool/sched_async.py rename to gaussdb_pool/gaussdb_pool/sched_async.py index 86f59d036..732e1b45a 100644 --- a/psycopg_pool/psycopg_pool/sched_async.py +++ b/gaussdb_pool/gaussdb_pool/sched_async.py @@ -10,7 +10,7 @@ `[threading/asyncio].Event` and the two would be confusing. """ -# Copyright (C) 2021 The Psycopg Team +# Copyright (C) 2021 The GaussDB Team from __future__ import annotations diff --git a/gaussdb_pool/gaussdb_pool/version.py b/gaussdb_pool/gaussdb_pool/version.py new file mode 100644 index 000000000..246143515 --- /dev/null +++ b/gaussdb_pool/gaussdb_pool/version.py @@ -0,0 +1,12 @@ +""" +gaussdb pool version file. +""" + +# Copyright (C) 2021 The GaussDB Team + +from importlib import metadata + +try: + __version__ = metadata.version("gaussdb-pool") +except metadata.PackageNotFoundError: + __version__ = "0.0.0.0" diff --git a/psycopg_pool/pyproject.toml b/gaussdb_pool/pyproject.toml similarity index 81% rename from psycopg_pool/pyproject.toml rename to gaussdb_pool/pyproject.toml index 4a0879e83..33ee3b958 100644 --- a/psycopg_pool/pyproject.toml +++ b/gaussdb_pool/pyproject.toml @@ -3,11 +3,11 @@ requires = ["setuptools>=49.2.0", "wheel>=0.37"] build-backend = "setuptools.build_meta" [project] -name = "psycopg-pool" -description = "Connection Pool for Psycopg" +name = "gaussdb-pool" +description = "Connection Pool for GaussDB" # STOP AND READ! if you change: -version = "3.3.0.dev1" +version = "1.0.0.dev1" # also change: # - `docs/news_pool.rst` to declare this version current or unreleased @@ -44,11 +44,11 @@ email = "daniele.varrazzo@gmail.com" text = "GNU Lesser General Public License v3 (LGPLv3)" [project.urls] -Homepage = "https://psycopg.org/" -Documentation = "https://www.psycopg.org/psycopg3/docs/advanced/pool.html" -Changes = "https://psycopg.org/psycopg3/docs/news_pool.html" -Code = "https://github.com/psycopg/psycopg" -"Issue Tracker" = "https://github.com/psycopg/psycopg/issues" +Homepage = "https://gaussdb.org/" +Documentation = "https://www.gaussdb.org/gaussdb/docs/advanced/pool.html" +Changes = "https://gaussdb.org/gaussdb/docs/news_pool.html" +Code = "https://github.com/gaussdb/gaussdb" +"Issue Tracker" = "https://github.com/gaussdb/gaussdb/issues" [project.readme] file = "README.rst" @@ -60,7 +60,7 @@ license-files = ["LICENSE.txt"] include-package-data = true [tool.setuptools.package-data] -psycopg_pool = [ +gaussdb_pool = [ "py.typed", ] diff --git a/psycopg/.flake8 b/psycopg/.flake8 deleted file mode 100644 index 33b08d768..000000000 --- a/psycopg/.flake8 +++ /dev/null @@ -1,6 +0,0 @@ -[flake8] -max-line-length = 88 -ignore = W503, E203, E704 -per-file-ignores = - # Autogenerated section - psycopg/errors.py: E125, E128, E302 diff --git a/psycopg/README.rst b/psycopg/README.rst deleted file mode 100644 index d6dfbe882..000000000 --- a/psycopg/README.rst +++ /dev/null @@ -1,42 +0,0 @@ -Psycopg 3: PostgreSQL database adapter for Python -================================================= - -Psycopg 3 is a modern implementation of a PostgreSQL adapter for Python. - -This distribution contains the pure Python package ``psycopg``. - -.. Note:: - - Despite the lack of number in the package name, this package is the - successor of psycopg2_. - - Please use the psycopg2 package if you are maintaining an existing program - using psycopg2 as a dependency. If you are developing something new, - Psycopg 3 is the most current implementation of the adapter. - - .. _psycopg2: https://pypi.org/project/psycopg2/ - - -Installation ------------- - -In short, run the following:: - - pip install --upgrade pip # to upgrade pip - pip install "psycopg[binary,pool]" # to install package and dependencies - -If something goes wrong, and for more information about installation, please -check out the `Installation documentation`__. - -.. __: https://www.psycopg.org/psycopg3/docs/basic/install.html# - - -Hacking -------- - -For development information check out `the project readme`__. - -.. __: https://github.com/psycopg/psycopg#readme - - -Copyright (C) 2020 The Psycopg Team diff --git a/psycopg/psycopg/version.py b/psycopg/psycopg/version.py deleted file mode 100644 index b6c68c2b9..000000000 --- a/psycopg/psycopg/version.py +++ /dev/null @@ -1,12 +0,0 @@ -""" -psycopg distribution version file. -""" - -# Copyright (C) 2020 The Psycopg Team - -from importlib import metadata - -try: - __version__ = metadata.version("psycopg") -except metadata.PackageNotFoundError: - __version__ = "0.0.0.0" diff --git a/psycopg_c/.flake8 b/psycopg_c/.flake8 deleted file mode 100644 index 40a061b1e..000000000 --- a/psycopg_c/.flake8 +++ /dev/null @@ -1,3 +0,0 @@ -[flake8] -max-line-length = 88 -ignore = W503, E203, E704 diff --git a/psycopg_c/MANIFEST.in b/psycopg_c/MANIFEST.in deleted file mode 100644 index e9404c7d7..000000000 --- a/psycopg_c/MANIFEST.in +++ /dev/null @@ -1,3 +0,0 @@ -# Include the build backend in the distributed files. -# It doesn't seem it can be specified in pyproject.toml -include build_backend/*.py diff --git a/psycopg_c/README-binary.rst b/psycopg_c/README-binary.rst deleted file mode 100644 index af435f422..000000000 --- a/psycopg_c/README-binary.rst +++ /dev/null @@ -1,33 +0,0 @@ -Psycopg 3: PostgreSQL database adapter for Python - binary package -================================================================== - -This distribution package is an optional component of `Psycopg 3`__: it -contains the optional optimization package `psycopg_binary`__. - -.. __: https://pypi.org/project/psycopg/ -.. __: https://www.psycopg.org/psycopg3/docs/basic/install.html - #binary-installation - -You shouldn't install this package directly: use instead :: - - pip install "psycopg[binary]" - -to install a version of the optimization package matching the ``psycopg`` -version installed. - -Installing this package requires pip >= 20.3 or newer installed. - -This package is not available for every platform: check out `Binary -installation`__ in the documentation. - -.. __: https://www.psycopg.org/psycopg3/docs/basic/install.html - #binary-installation - -Please read `the project readme`__ and `the installation documentation`__ for -more details. - -.. __: https://github.com/psycopg/psycopg#readme -.. __: https://www.psycopg.org/psycopg3/docs/basic/install.html - - -Copyright (C) 2020 The Psycopg Team diff --git a/psycopg_c/README.rst b/psycopg_c/README.rst deleted file mode 100644 index bdfba0e7f..000000000 --- a/psycopg_c/README.rst +++ /dev/null @@ -1,38 +0,0 @@ -Psycopg 3: PostgreSQL database adapter for Python - optimisation package -======================================================================== - -This distribution package is an optional component of `Psycopg 3`__: it -contains the optional optimization package `psycopg_c`__. - -.. __: https://pypi.org/project/psycopg/ -.. __: https://www.psycopg.org/psycopg3/docs/basic/install.html - #local-installation - -You shouldn't install this package directly: use instead :: - - pip install "psycopg[c]" - -to install a version of the optimization package matching the ``psycopg`` -version installed. - -Installing this package requires some prerequisites: check `Local -installation`__ in the documentation. Without a C compiler and some library -headers install *will fail*: this is not a bug. - -If you are unable to meet the prerequisite needed you might want to install -``psycopg[binary]`` instead: look for `Binary installation`__ in the -documentation. - -.. __: https://www.psycopg.org/psycopg3/docs/basic/install.html - #local-installation -.. __: https://www.psycopg.org/psycopg3/docs/basic/install.html - #binary-installation - -Please read `the project readme`__ and `the installation documentation`__ for -more details. - -.. __: https://github.com/psycopg/psycopg#readme -.. __: https://www.psycopg.org/psycopg3/docs/basic/install.html - - -Copyright (C) 2020 The Psycopg Team diff --git a/psycopg_c/build_backend/cython_backend.py b/psycopg_c/build_backend/cython_backend.py deleted file mode 100644 index 686ecc746..000000000 --- a/psycopg_c/build_backend/cython_backend.py +++ /dev/null @@ -1,45 +0,0 @@ -""" -Build backend to build a Cython-based project only if needed. - -This backend adds a build dependency on Cython if pxd files are available, -otherwise it only relies on the c files to have been precompiled. -""" - -# Copyright (C) 2023 The Psycopg Team - -from __future__ import annotations - -import os -import sys -from typing import Any - -from setuptools import build_meta - -if sys.version_info >= (3, 11): - import tomllib -else: - import tomli as tomllib - - -def get_requires_for_build_wheel(config_settings: Any = None) -> list[str]: - if not os.path.exists("psycopg_c/_psycopg.pyx"): - # Cython files don't exist: we must be in a sdist and we can trust - # that the .c files we have packaged exist. - return [] - - # Cython files exists: we must be in a git checkout and we need Cython - # to build. Get the version from the pyproject itself to keep things in the - # same place. - with open("pyproject.toml", "rb") as f: - pyprj = tomllib.load(f) - - rv: list[str] = pyprj["cython-backend"]["cython-requires"] - return rv - - -get_requires_for_build_sdist = get_requires_for_build_wheel - -# For the rest, behave like the rest of setuptoos.build_meta -prepare_metadata_for_build_wheel = build_meta.prepare_metadata_for_build_wheel -build_wheel = build_meta.build_wheel -build_sdist = build_meta.build_sdist diff --git a/psycopg_c/build_backend/psycopg_build_ext.py b/psycopg_c/build_backend/psycopg_build_ext.py deleted file mode 100644 index c8bf35f79..000000000 --- a/psycopg_c/build_backend/psycopg_build_ext.py +++ /dev/null @@ -1,66 +0,0 @@ -""" -Build backend module for psycopg Cython components. - -Convert Cython to C if required, compile the C modules adding build from the -libpq and accounting for other platform differences. -""" - -# Copyright (C) 2024 The Psycopg Team - -import os -import sys -import subprocess as sp -from distutils import log -from distutils.command.build_ext import build_ext - - -def get_config(what: str) -> str: - pg_config = "pg_config" - try: - out = sp.run([pg_config, f"--{what}"], stdout=sp.PIPE, check=True) - except Exception as e: - log.error(f"couldn't run {pg_config!r} --{what}: %s", e) - raise - else: - return out.stdout.strip().decode() - - -class psycopg_build_ext(build_ext): - def finalize_options(self) -> None: - self._setup_ext_build() - super().finalize_options() - - def _setup_ext_build(self) -> None: - # Add include and lib dir for the libpq. - - # MSVC requires an explicit "libpq" - libpq = "pq" if sys.platform != "win32" else "libpq" - - for ext in self.distribution.ext_modules: - ext.libraries.append(libpq) - ext.include_dirs.append(get_config("includedir")) - ext.library_dirs.append(get_config("libdir")) - - if sys.platform == "win32": - # For __imp_htons and others - ext.libraries.append("ws2_32") - - # In the sdist there are not .pyx, only c, so we don't need Cython. - # Otherwise Cython is a requirement and it is used to compile pyx to c. - if os.path.exists("psycopg_c/_psycopg.pyx"): - from Cython.Build import cythonize # type: ignore - - for ext in self.distribution.ext_modules: - for i in range(len(ext.sources)): - base, fext = os.path.splitext(ext.sources[i]) - if fext == ".c" and os.path.exists(base + ".pyx"): - ext.sources[i] = base + ".pyx" - - self.distribution.ext_modules = cythonize( - self.distribution.ext_modules, - language_level=3, - compiler_directives={ - "always_allow_keywords": False, - }, - annotate=False, # enable to get an html view of the C module - ) diff --git a/psycopg_c/psycopg_c/.gitignore b/psycopg_c/psycopg_c/.gitignore deleted file mode 100644 index 36edb643b..000000000 --- a/psycopg_c/psycopg_c/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/*.so -_psycopg.c -pq.c -*.html diff --git a/psycopg_c/psycopg_c/__init__.py b/psycopg_c/psycopg_c/__init__.py deleted file mode 100644 index 14db92bc2..000000000 --- a/psycopg_c/psycopg_c/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -""" -psycopg -- PostgreSQL database adapter for Python -- C optimization package -""" - -# Copyright (C) 2020 The Psycopg Team - -import sys - -# This package shouldn't be imported before psycopg itself, or weird things -# will happen -if "psycopg" not in sys.modules: - raise ImportError("the psycopg package should be imported before psycopg_c") - -from .version import __version__ as __version__ # noqa diff --git a/psycopg_c/psycopg_c/_psycopg.pyi b/psycopg_c/psycopg_c/_psycopg.pyi deleted file mode 100644 index 498ee7d49..000000000 --- a/psycopg_c/psycopg_c/_psycopg.pyi +++ /dev/null @@ -1,83 +0,0 @@ -""" -Stub representation of the public objects exposed by the _psycopg module. - -TODO: this should be generated by mypy's stubgen but it crashes with no -information. Will submit a bug. -""" - -# Copyright (C) 2020 The Psycopg Team - -from __future__ import annotations - -from typing import Any, Sequence -from collections import deque - -from psycopg import BaseConnection, abc, pq -from psycopg.rows import Row, RowMaker -from psycopg.adapt import AdaptersMap, PyFormat -from psycopg.pq.abc import PGcancelConn, PGconn, PGresult - -class Transformer(abc.AdaptContext): - types: tuple[int, ...] | None - formats: list[pq.Format] | None - def __init__(self, context: abc.AdaptContext | None = None): ... - @classmethod - def from_context(cls, context: abc.AdaptContext | None) -> "Transformer": ... - @property - def connection(self) -> BaseConnection[Any] | None: ... - @property - def encoding(self) -> str: ... - @property - def adapters(self) -> AdaptersMap: ... - @property - def pgresult(self) -> PGresult | None: ... - def set_pgresult( - self, - result: "PGresult" | None, - *, - set_loaders: bool = True, - format: pq.Format | None = None, - ) -> None: ... - def set_dumper_types(self, types: Sequence[int], format: pq.Format) -> None: ... - def set_loader_types(self, types: Sequence[int], format: pq.Format) -> None: ... - def dump_sequence( - self, params: Sequence[Any], formats: Sequence[PyFormat] - ) -> Sequence[abc.Buffer | None]: ... - def as_literal(self, obj: Any) -> bytes: ... - def get_dumper(self, obj: Any, format: PyFormat) -> abc.Dumper: ... - def load_rows(self, row0: int, row1: int, make_row: RowMaker[Row]) -> list[Row]: ... - def load_row(self, row: int, make_row: RowMaker[Row]) -> Row | None: ... - def load_sequence(self, record: Sequence[abc.Buffer | None]) -> tuple[Any, ...]: ... - def get_loader(self, oid: int, format: pq.Format) -> abc.Loader: ... - -# Generators -def connect(conninfo: str, *, timeout: float = 0.0) -> abc.PQGenConn[PGconn]: ... -def cancel( - cancel_conn: PGcancelConn, *, timeout: float = 0.0 -) -> abc.PQGenConn[None]: ... -def execute(pgconn: PGconn) -> abc.PQGen[list[PGresult]]: ... -def send(pgconn: PGconn) -> abc.PQGen[None]: ... -def fetch_many(pgconn: PGconn) -> abc.PQGen[list[PGresult]]: ... -def fetch(pgconn: PGconn) -> abc.PQGen[PGresult | None]: ... -def pipeline_communicate( - pgconn: PGconn, commands: deque[abc.PipelineCommand] -) -> abc.PQGen[list[list[PGresult]]]: ... -def wait_c( - gen: abc.PQGen[abc.RV], fileno: int, interval: float | None = None -) -> abc.RV: ... - -# Copy support -def format_row_text( - row: Sequence[Any], tx: abc.Transformer, out: bytearray | None = None -) -> bytearray: ... -def format_row_binary( - row: Sequence[Any], tx: abc.Transformer, out: bytearray | None = None -) -> bytearray: ... -def parse_row_text(data: abc.Buffer, tx: abc.Transformer) -> tuple[Any, ...]: ... -def parse_row_binary(data: abc.Buffer, tx: abc.Transformer) -> tuple[Any, ...]: ... - -# Arrays optimization -def array_load_text( - data: abc.Buffer, loader: abc.Loader, delimiter: bytes = b"," -) -> list[Any]: ... -def array_load_binary(data: abc.Buffer, tx: abc.Transformer) -> list[Any]: ... diff --git a/psycopg_c/psycopg_c/_psycopg.pyx b/psycopg_c/psycopg_c/_psycopg.pyx deleted file mode 100644 index 731ba581c..000000000 --- a/psycopg_c/psycopg_c/_psycopg.pyx +++ /dev/null @@ -1,56 +0,0 @@ -""" -psycopg_c._psycopg optimization module. - -The module contains optimized C code used in preference to Python code -if a compiler is available. -""" - -# Copyright (C) 2020 The Psycopg Team - -from psycopg_c cimport pq -from psycopg_c.pq cimport libpq -from psycopg_c._psycopg cimport oids - -import logging - -from psycopg.pq import Format as _pq_Format -from psycopg._enums import PyFormat as _py_Format - -logger = logging.getLogger("psycopg") - -PQ_TEXT = _pq_Format.TEXT -PQ_BINARY = _pq_Format.BINARY - -PG_AUTO = _py_Format.AUTO -PG_TEXT = _py_Format.TEXT -PG_BINARY = _py_Format.BINARY - - -cdef extern from *: - """ -/* Include this early to avoid a warning about redefined ARRAYSIZE in winnt.h */ -#ifdef MS_WINDOWS -#define WIN32_LEAN_AND_MEAN -#include -#endif - -#ifndef ARRAYSIZE -#define ARRAYSIZE(a) ((sizeof(a) / sizeof(*(a)))) -#endif - """ - int ARRAYSIZE(void *array) - - -include "_psycopg/adapt.pyx" -include "_psycopg/copy.pyx" -include "_psycopg/generators.pyx" -include "_psycopg/transform.pyx" -include "_psycopg/waiting.pyx" - -include "types/array.pyx" -include "types/datetime.pyx" -include "types/numeric.pyx" -include "types/bool.pyx" -include "types/numpy.pyx" -include "types/string.pyx" -include "types/uuid.pyx" diff --git a/psycopg_c/psycopg_c/_psycopg/__init__.pxd b/psycopg_c/psycopg_c/_psycopg/__init__.pxd deleted file mode 100644 index db22deb73..000000000 --- a/psycopg_c/psycopg_c/_psycopg/__init__.pxd +++ /dev/null @@ -1,9 +0,0 @@ -""" -psycopg_c._psycopg cython module. - -This file is necessary to allow c-importing pxd files from this directory. -""" - -# Copyright (C) 2020 The Psycopg Team - -from psycopg_c._psycopg cimport oids diff --git a/psycopg_c/psycopg_c/_psycopg/adapt.pyx b/psycopg_c/psycopg_c/_psycopg/adapt.pyx deleted file mode 100644 index 5d38e7742..000000000 --- a/psycopg_c/psycopg_c/_psycopg/adapt.pyx +++ /dev/null @@ -1,169 +0,0 @@ -""" -C implementation of the adaptation system. - -This module maps each Python adaptation function to a C adaptation function. -Notice that C adaptation functions have a different signature because they can -avoid making a memory copy, however this makes impossible to expose them to -Python. - -This module exposes facilities to map the builtin adapters in python to -equivalent C implementations. - -""" - -# Copyright (C) 2020 The Psycopg Team - -from typing import Any - -cimport cython -from libc.string cimport memchr, memcpy -from cpython.bytearray cimport PyByteArray_AS_STRING, PyByteArray_FromStringAndSize -from cpython.bytearray cimport PyByteArray_GET_SIZE, PyByteArray_Resize - -from psycopg_c.pq cimport Escaping, _buffer_as_string_and_size - -from psycopg import errors as e - - -@cython.freelist(8) -cdef class CDumper: - - cdef readonly object cls - cdef pq.PGconn _pgconn - - oid = oids.INVALID_OID - - def __cinit__(self, cls, context: AdaptContext | None = None): - self.cls = cls - conn = context.connection if context is not None else None - self._pgconn = conn.pgconn if conn is not None else None - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - """Store the Postgres representation *obj* into *rv* at *offset* - - Return the number of bytes written to rv or -1 on Python exception. - - Subclasses must implement this method. The `dump()` implementation - transforms the result of this method to a bytearray so that it can be - returned to Python. - - The function interface allows C code to use this method automatically - to create larger buffers, e.g. for copy, composite objects, etc. - - Implementation note: as you will always need to make sure that rv - has enough space to include what you want to dump, `ensure_size()` - might probably come handy. - """ - raise NotImplementedError() - - def dump(self, obj) -> Buffer | None: - """Return the Postgres representation of *obj* as Python array of bytes""" - cdef rv = PyByteArray_FromStringAndSize("", 0) - cdef Py_ssize_t length = self.cdump(obj, rv, 0) - PyByteArray_Resize(rv, length) - return rv - - def quote(self, obj) -> Buffer: - cdef char *ptr - cdef char *ptr_out - cdef Py_ssize_t length - - value = self.dump(obj) - - if self._pgconn is not None: - esc = Escaping(self._pgconn) - # escaping and quoting - return esc.escape_literal(value) - - # This path is taken when quote is asked without a connection, - # usually it means by psycopg.sql.quote() or by - # 'Composible.as_string(None)'. Most often than not this is done by - # someone generating a SQL file to consume elsewhere. - - rv = PyByteArray_FromStringAndSize("", 0) - - # No quoting, only quote escaping, random bs escaping. See further. - esc = Escaping() - out = esc.escape_string(value) - - _buffer_as_string_and_size(out, &ptr, &length) - - if not memchr(ptr, b'\\', length): - # If the string has no backslash, the result is correct and we - # don't need to bother with standard_conforming_strings. - PyByteArray_Resize(rv, length + 2) # Must include the quotes - ptr_out = PyByteArray_AS_STRING(rv) - ptr_out[0] = b"'" - memcpy(ptr_out + 1, ptr, length) - ptr_out[length + 1] = b"'" - return rv - - # The libpq has a crazy behaviour: PQescapeString uses the last - # standard_conforming_strings setting seen on a connection. This - # means that backslashes might be escaped or might not. - # - # A syntax E'\\' works everywhere, whereas E'\' is an error. OTOH, - # if scs is off, '\\' raises a warning and '\' is an error. - # - # Check what the libpq does, and if it doesn't escape the backslash - # let's do it on our own. Never mind the race condition. - PyByteArray_Resize(rv, length + 4) # Must include " E'...'" quotes - ptr_out = PyByteArray_AS_STRING(rv) - ptr_out[0] = b" " - ptr_out[1] = b"E" - ptr_out[2] = b"'" - memcpy(ptr_out + 3, ptr, length) - ptr_out[length + 3] = b"'" - - if esc.escape_string(b"\\") == b"\\": - rv = bytes(rv).replace(b"\\", b"\\\\") - return rv - - cpdef object get_key(self, object obj, object format): - return self.cls - - cpdef object upgrade(self, object obj, object format): - return self - - @staticmethod - cdef char *ensure_size(bytearray ba, Py_ssize_t offset, Py_ssize_t size) except NULL: - """ - Grow *ba*, if necessary, to contains at least *size* bytes after *offset* - - Return the pointer in the bytearray at *offset*, i.e. the place where - you want to write *size* bytes. - """ - cdef Py_ssize_t curr_size = PyByteArray_GET_SIZE(ba) - cdef Py_ssize_t new_size = offset + size - if curr_size < new_size: - PyByteArray_Resize(ba, new_size) - - return PyByteArray_AS_STRING(ba) + offset - - -@cython.freelist(8) -cdef class CLoader: - cdef public libpq.Oid oid - cdef pq.PGconn _pgconn - - def __cinit__(self, libpq.Oid oid, context: AdaptContext | None = None): - self.oid = oid - conn = context.connection if context is not None else None - self._pgconn = conn.pgconn if conn is not None else None - - cdef object cload(self, const char *data, size_t length): - raise NotImplementedError() - - def load(self, object data) -> Any: - cdef char *ptr - cdef Py_ssize_t length - _buffer_as_string_and_size(data, &ptr, &length) - return self.cload(ptr, length) - - -cdef class _CRecursiveLoader(CLoader): - - cdef Transformer _tx - - def __cinit__(self, oid: int, context: AdaptContext | None = None): - self._tx = Transformer.from_context(context) diff --git a/psycopg_c/psycopg_c/_psycopg/copy.pyx b/psycopg_c/psycopg_c/_psycopg/copy.pyx deleted file mode 100644 index cf29252bf..000000000 --- a/psycopg_c/psycopg_c/_psycopg/copy.pyx +++ /dev/null @@ -1,365 +0,0 @@ -""" -C optimised functions for the copy system. - -""" - -# Copyright (C) 2020 The Psycopg Team - -from libc.stdint cimport int32_t, uint16_t, uint32_t -from libc.string cimport memcpy -from cpython.bytearray cimport PyByteArray_AS_STRING, PyByteArray_FromStringAndSize -from cpython.bytearray cimport PyByteArray_GET_SIZE, PyByteArray_Resize -from cpython.memoryview cimport PyMemoryView_FromObject - -from psycopg_c.pq cimport ViewBuffer -from psycopg_c._psycopg cimport endian - -from psycopg import errors as e - - -cdef int32_t _binary_null = -1 - - -def format_row_binary( - row: Sequence[Any], tx: Transformer, out: bytearray = None -) -> bytearray: - """Convert a row of adapted data to the data to send for binary copy""" - cdef Py_ssize_t rowlen = len(row) - cdef uint16_t berowlen = endian.htobe16(rowlen) - - cdef Py_ssize_t pos # offset in 'out' where to write - if out is None: - out = PyByteArray_FromStringAndSize("", 0) - pos = 0 - else: - pos = PyByteArray_GET_SIZE(out) - - # let's start from a nice chunk - # (larger than most fixed size; for variable ones, oh well, we'll resize it) - cdef char *target = CDumper.ensure_size( - out, pos, sizeof(berowlen) + 20 * rowlen) - - # Write the number of fields as network-order 16 bits - memcpy(target, &berowlen, sizeof(berowlen)) - pos += sizeof(berowlen) - - cdef Py_ssize_t size - cdef uint32_t besize - cdef char *buf - cdef int i - cdef PyObject *fmt = PG_BINARY - cdef PyObject *row_dumper - - if not tx._row_dumpers: - tx._row_dumpers = PyList_New(rowlen) - - dumpers = tx._row_dumpers - - for i in range(rowlen): - item = row[i] - if item is None: - _append_binary_none(out, &pos) - continue - - row_dumper = PyList_GET_ITEM(dumpers, i) - if not row_dumper: - row_dumper = tx.get_row_dumper(item, fmt) - Py_INCREF(row_dumper) - PyList_SET_ITEM(dumpers, i, row_dumper) - - if (row_dumper).cdumper is not None: - # A cdumper can resize if necessary and copy in place - size = (row_dumper).cdumper.cdump( - item, out, pos + sizeof(besize)) - # Also add the size of the item, before the item - besize = endian.htobe32(size) - target = PyByteArray_AS_STRING(out) # might have been moved by cdump - memcpy(target + pos, &besize, sizeof(besize)) - else: - # A Python dumper, gotta call it and extract its juices - b = PyObject_CallFunctionObjArgs( - (row_dumper).dumpfunc, item, NULL) - if b is None: - _append_binary_none(out, &pos) - continue - else: - _buffer_as_string_and_size(b, &buf, &size) - target = CDumper.ensure_size(out, pos, size + sizeof(besize)) - besize = endian.htobe32(size) - memcpy(target, &besize, sizeof(besize)) - memcpy(target + sizeof(besize), buf, size) - - pos += size + sizeof(besize) - - # Resize to the final size - PyByteArray_Resize(out, pos) - return out - - -cdef int _append_binary_none(bytearray out, Py_ssize_t *pos) except -1: - cdef char *target - target = CDumper.ensure_size(out, pos[0], sizeof(_binary_null)) - memcpy(target, &_binary_null, sizeof(_binary_null)) - pos[0] += sizeof(_binary_null) - return 0 - - -def format_row_text( - row: Sequence[Any], tx: Transformer, out: bytearray = None -) -> bytearray: - cdef Py_ssize_t pos # offset in 'out' where to write - if out is None: - out = PyByteArray_FromStringAndSize("", 0) - pos = 0 - else: - pos = PyByteArray_GET_SIZE(out) - - cdef Py_ssize_t rowlen = len(row) - - if rowlen == 0: - PyByteArray_Resize(out, pos + 1) - out[pos] = b"\n" - return out - - cdef Py_ssize_t size, tmpsize - cdef char *buf - cdef int i, j - cdef unsigned char *target - cdef int nesc - cdef int with_tab - cdef PyObject *fmt = PG_TEXT - cdef PyObject *row_dumper - - for i in range(rowlen): - # Include the tab before the data, so it gets included in the resizes - with_tab = i > 0 - - item = row[i] - if item is None: - _append_text_none(out, &pos, with_tab) - continue - - row_dumper = tx.get_row_dumper(item, fmt) - if (row_dumper).cdumper is not None: - # A cdumper can resize if necessary and copy in place - size = (row_dumper).cdumper.cdump( - item, out, pos + with_tab) - target = PyByteArray_AS_STRING(out) + pos - else: - # A Python dumper, gotta call it and extract its juices - b = PyObject_CallFunctionObjArgs( - (row_dumper).dumpfunc, item, NULL) - if b is None: - _append_text_none(out, &pos, with_tab) - continue - else: - _buffer_as_string_and_size(b, &buf, &size) - target = CDumper.ensure_size(out, pos, size + with_tab) - memcpy(target + with_tab, buf, size) - - # Prepend a tab to the data just written - if with_tab: - target[0] = b"\t" - target += 1 - pos += 1 - - # Now from pos to pos + size there is a textual representation: it may - # contain chars to escape. Scan to find how many such chars there are. - nesc = 0 - for j in range(size): - if copy_escape_lut[target[j]]: - nesc += 1 - - # If there is any char to escape, walk backwards pushing the chars - # forward and interspersing backslashes. - if nesc > 0: - tmpsize = size + nesc - target = CDumper.ensure_size(out, pos, tmpsize) - for j in range(size - 1, -1, -1): - if copy_escape_lut[target[j]]: - target[j + nesc] = copy_escape_lut[target[j]] - nesc -= 1 - target[j + nesc] = b"\\" - if nesc <= 0: - break - else: - target[j + nesc] = target[j] - pos += tmpsize - else: - pos += size - - # Resize to the final size, add the newline - PyByteArray_Resize(out, pos + 1) - out[pos] = b"\n" - return out - - -cdef int _append_text_none(bytearray out, Py_ssize_t *pos, int with_tab) except -1: - cdef char *target - - if with_tab: - target = CDumper.ensure_size(out, pos[0], 3) - memcpy(target, b"\t\\N", 3) - pos[0] += 3 - else: - target = CDumper.ensure_size(out, pos[0], 2) - memcpy(target, b"\\N", 2) - pos[0] += 2 - - return 0 - - -def parse_row_binary(data, tx: Transformer) -> tuple[Any, ...]: - cdef unsigned char *ptr - cdef Py_ssize_t bufsize - _buffer_as_string_and_size(data, &ptr, &bufsize) - cdef unsigned char *bufend = ptr + bufsize - - cdef uint16_t benfields - memcpy(&benfields, ptr, sizeof(benfields)) - cdef int nfields = endian.be16toh(benfields) - ptr += sizeof(benfields) - cdef list row = PyList_New(nfields) - - cdef int col - cdef int32_t belength - cdef Py_ssize_t length - - for col in range(nfields): - memcpy(&belength, ptr, sizeof(belength)) - ptr += sizeof(belength) - if belength == _binary_null: - field = None - else: - length = endian.be32toh(belength) - if ptr + length > bufend: - raise e.DataError("bad copy data: length exceeding data") - field = PyMemoryView_FromObject( - ViewBuffer._from_buffer(data, ptr, length)) - ptr += length - - Py_INCREF(field) - PyList_SET_ITEM(row, col, field) - - return tx.load_sequence(row) - - -def parse_row_text(data, tx: Transformer) -> tuple[Any, ...]: - cdef unsigned char *fstart - cdef Py_ssize_t size - _buffer_as_string_and_size(data, &fstart, &size) - - # politely assume that the number of fields will be what in the result - cdef int nfields = tx._nfields - cdef list row = PyList_New(nfields) - - cdef unsigned char *fend - cdef unsigned char *rowend = fstart + size - cdef unsigned char *src - cdef unsigned char *tgt - cdef int col - cdef int num_bs - - for col in range(nfields): - fend = fstart - num_bs = 0 - # Scan to the end of the field, remember if you see any backslash - while fend[0] != b'\t' and fend[0] != b'\n' and fend < rowend: - if fend[0] == b'\\': - num_bs += 1 - # skip the next char to avoid counting escaped backslashes twice - fend += 1 - fend += 1 - - # Check if we stopped for the right reason - if fend >= rowend: - raise e.DataError("bad copy data: field delimiter not found") - elif fend[0] == b'\t' and col == nfields - 1: - raise e.DataError("bad copy data: got a tab at the end of the row") - elif fend[0] == b'\n' and col != nfields - 1: - raise e.DataError( - "bad copy format: got a newline before the end of the row") - - # Is this a NULL? - if fend - fstart == 2 and fstart[0] == b'\\' and fstart[1] == b'N': - field = None - - # Is this a field with no backslash? - elif num_bs == 0: - # Nothing to unescape: we don't need a copy - field = PyMemoryView_FromObject( - ViewBuffer._from_buffer(data, fstart, fend - fstart)) - - # This is a field containing backslashes - else: - # We need a copy of the buffer to unescape - field = PyByteArray_FromStringAndSize("", 0) - PyByteArray_Resize(field, fend - fstart - num_bs) - tgt = PyByteArray_AS_STRING(field) - src = fstart - while (src < fend): - if src[0] != b'\\': - tgt[0] = src[0] - else: - src += 1 - tgt[0] = copy_unescape_lut[src[0]] - src += 1 - tgt += 1 - - Py_INCREF(field) - PyList_SET_ITEM(row, col, field) - - # Start of the field - fstart = fend + 1 - - # Convert the array of buffers into Python objects - return tx.load_sequence(row) - - -cdef extern from *: - """ -/* handle chars to (un)escape in text copy representation */ -/* '\b', '\t', '\n', '\v', '\f', '\r', '\\' */ - -/* Escaping chars */ -static const char copy_escape_lut[] = { - 0, 0, 0, 0, 0, 0, 0, 0, 98, 116, 110, 118, 102, 114, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 92, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -}; - -/* Conversion of escaped to unescaped chars */ -static const char copy_unescape_lut[] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - 96, 97, 8, 99, 100, 101, 12, 103, 104, 105, 106, 107, 108, 109, 10, 111, -112, 113, 13, 115, 9, 117, 11, 119, 120, 121, 122, 123, 124, 125, 126, 127, -128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, -144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, -160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, -176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, -192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, -208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, -224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, -240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, -}; - """ - const char[256] copy_escape_lut - const char[256] copy_unescape_lut diff --git a/psycopg_c/psycopg_c/_psycopg/endian.pxd b/psycopg_c/psycopg_c/_psycopg/endian.pxd deleted file mode 100644 index 09dcc93e7..000000000 --- a/psycopg_c/psycopg_c/_psycopg/endian.pxd +++ /dev/null @@ -1,158 +0,0 @@ -""" -Access to endian conversion function -""" - -# Copyright (C) 2020 The Psycopg Team - -from libc.stdint cimport uint16_t, uint32_t, uint64_t - - -cdef extern from * nogil: - # from https://gist.github.com/panzi/6856583 - # Improved in: - # https://github.com/linux-sunxi/sunxi-tools/blob/master/include/portable_endian.h - """ -// "License": Public Domain -// I, Mathias Panzenböck, place this file hereby into the public domain. Use it at your own risk for whatever you like. -// In case there are jurisdictions that don't support putting things in the public domain you can also consider it to -// be "dual licensed" under the BSD, MIT and Apache licenses, if you want to. This code is trivial anyway. Consider it -// an example on how to get the endian conversion functions on different platforms. - -#ifndef PORTABLE_ENDIAN_H__ -#define PORTABLE_ENDIAN_H__ - -#if (defined(_WIN16) || defined(_WIN32) || defined(_WIN64)) && !defined(__WINDOWS__) - -# define __WINDOWS__ - -#endif - -#if defined(__linux__) || defined(__CYGWIN__) - -# include - -#elif defined(__APPLE__) - -# include - -# define htobe16(x) OSSwapHostToBigInt16(x) -# define htole16(x) OSSwapHostToLittleInt16(x) -# define be16toh(x) OSSwapBigToHostInt16(x) -# define le16toh(x) OSSwapLittleToHostInt16(x) - -# define htobe32(x) OSSwapHostToBigInt32(x) -# define htole32(x) OSSwapHostToLittleInt32(x) -# define be32toh(x) OSSwapBigToHostInt32(x) -# define le32toh(x) OSSwapLittleToHostInt32(x) - -# define htobe64(x) OSSwapHostToBigInt64(x) -# define htole64(x) OSSwapHostToLittleInt64(x) -# define be64toh(x) OSSwapBigToHostInt64(x) -# define le64toh(x) OSSwapLittleToHostInt64(x) - -# define __BYTE_ORDER BYTE_ORDER -# define __BIG_ENDIAN BIG_ENDIAN -# define __LITTLE_ENDIAN LITTLE_ENDIAN -# define __PDP_ENDIAN PDP_ENDIAN - -#elif defined(__OpenBSD__) || defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__) - -# include - -/* For functions still missing, try to substitute 'historic' OpenBSD names */ -#ifndef be16toh -# define be16toh(x) betoh16(x) -#endif -#ifndef le16toh -# define le16toh(x) letoh16(x) -#endif -#ifndef be32toh -# define be32toh(x) betoh32(x) -#endif -#ifndef le32toh -# define le32toh(x) letoh32(x) -#endif -#ifndef be64toh -# define be64toh(x) betoh64(x) -#endif -#ifndef le64toh -# define le64toh(x) letoh64(x) -#endif - -#elif defined(__WINDOWS__) - -# include -# ifndef _MSC_VER -# include -# endif - -# if BYTE_ORDER == LITTLE_ENDIAN - -# define htobe16(x) htons(x) -# define htole16(x) (x) -# define be16toh(x) ntohs(x) -# define le16toh(x) (x) - -# define htobe32(x) htonl(x) -# define htole32(x) (x) -# define be32toh(x) ntohl(x) -# define le32toh(x) (x) - -# define htobe64(x) htonll(x) -# define htole64(x) (x) -# define be64toh(x) ntohll(x) -# define le64toh(x) (x) - -# elif BYTE_ORDER == BIG_ENDIAN - - /* that would be xbox 360 */ -# define htobe16(x) (x) -# define htole16(x) __builtin_bswap16(x) -# define be16toh(x) (x) -# define le16toh(x) __builtin_bswap16(x) - -# define htobe32(x) (x) -# define htole32(x) __builtin_bswap32(x) -# define be32toh(x) (x) -# define le32toh(x) __builtin_bswap32(x) - -# define htobe64(x) (x) -# define htole64(x) __builtin_bswap64(x) -# define be64toh(x) (x) -# define le64toh(x) __builtin_bswap64(x) - -# else - -# error byte order not supported - -# endif - -# define __BYTE_ORDER BYTE_ORDER -# define __BIG_ENDIAN BIG_ENDIAN -# define __LITTLE_ENDIAN LITTLE_ENDIAN -# define __PDP_ENDIAN PDP_ENDIAN - -#elif defined(__sun) -# include -#else - -# error platform not supported - -#endif - -#endif - """ - cdef uint16_t htobe16(uint16_t host_16bits) - cdef uint16_t htole16(uint16_t host_16bits) - cdef uint16_t be16toh(uint16_t big_endian_16bits) - cdef uint16_t le16toh(uint16_t little_endian_16bits) - - cdef uint32_t htobe32(uint32_t host_32bits) - cdef uint32_t htole32(uint32_t host_32bits) - cdef uint32_t be32toh(uint32_t big_endian_32bits) - cdef uint32_t le32toh(uint32_t little_endian_32bits) - - cdef uint64_t htobe64(uint64_t host_64bits) - cdef uint64_t htole64(uint64_t host_64bits) - cdef uint64_t be64toh(uint64_t big_endian_64bits) - cdef uint64_t le64toh(uint64_t little_endian_64bits) diff --git a/psycopg_c/psycopg_c/_psycopg/generators.pyx b/psycopg_c/psycopg_c/_psycopg/generators.pyx deleted file mode 100644 index 2e01592da..000000000 --- a/psycopg_c/psycopg_c/_psycopg/generators.pyx +++ /dev/null @@ -1,356 +0,0 @@ -""" -C implementation of generators for the communication protocols with the libpq -""" - -# Copyright (C) 2020 The Psycopg Team - -from cpython.object cimport PyObject_CallFunctionObjArgs - -from time import monotonic -from collections import deque - -from psycopg import errors as e -from psycopg.pq import abc -from psycopg.abc import PipelineCommand, PQGen -from psycopg._enums import Ready, Wait -from psycopg._encodings import conninfo_encoding - - -cdef object WAIT_W = Wait.W -cdef object WAIT_R = Wait.R -cdef object WAIT_RW = Wait.RW -cdef object PY_READY_NONE = Ready.NONE -cdef object PY_READY_R = Ready.R -cdef object PY_READY_W = Ready.W -cdef object PY_READY_RW = Ready.RW -cdef int READY_NONE = Ready.NONE -cdef int READY_R = Ready.R -cdef int READY_W = Ready.W -cdef int READY_RW = Ready.RW - -def connect(conninfo: str, *, timeout: float = 0.0) -> PQGenConn[abc.PGconn]: - """ - Generator to create a database connection without blocking. - """ - cdef pq.PGconn conn = pq.PGconn.connect_start(conninfo.encode()) - cdef libpq.PGconn *pgconn_ptr = conn._pgconn_ptr - cdef int conn_status = libpq.PQstatus(pgconn_ptr) - cdef int poll_status - cdef object wait, ready - cdef float deadline = 0.0 - - if timeout: - deadline = monotonic() + timeout - - while True: - if conn_status == libpq.CONNECTION_BAD: - encoding = conninfo_encoding(conninfo) - raise e.OperationalError( - f"connection is bad: {conn.get_error_message(encoding)}", - pgconn=conn - ) - - with nogil: - poll_status = libpq.PQconnectPoll(pgconn_ptr) - - if poll_status == libpq.PGRES_POLLING_READING \ - or poll_status == libpq.PGRES_POLLING_WRITING: - wait = WAIT_R if poll_status == libpq.PGRES_POLLING_READING else WAIT_W - while True: - ready = yield (libpq.PQsocket(pgconn_ptr), wait) - if deadline and monotonic() > deadline: - raise e.ConnectionTimeout("connection timeout expired") - if ready: - break - - elif poll_status == libpq.PGRES_POLLING_OK: - break - elif poll_status == libpq.PGRES_POLLING_FAILED: - encoding = conninfo_encoding(conninfo) - raise e.OperationalError( - f"connection failed: {conn.get_error_message(encoding)}", - pgconn=e.finish_pgconn(conn), - ) - else: - raise e.InternalError( - f"unexpected poll status: {poll_status}", - pgconn=e.finish_pgconn(conn), - ) - - conn.nonblocking = 1 - return conn - - -def cancel(pq.PGcancelConn cancel_conn, *, timeout: float = 0.0) -> PQGenConn[None]: - cdef libpq.PGcancelConn *pgcancelconn_ptr = cancel_conn.pgcancelconn_ptr - cdef int status - cdef float deadline = 0.0 - - if timeout: - deadline = monotonic() + timeout - - while True: - if deadline and monotonic() > deadline: - raise e.CancellationTimeout("cancellation timeout expired") - with nogil: - status = libpq.PQcancelPoll(pgcancelconn_ptr) - if status == libpq.PGRES_POLLING_OK: - break - elif status == libpq.PGRES_POLLING_READING: - yield libpq.PQcancelSocket(pgcancelconn_ptr), WAIT_R - elif status == libpq.PGRES_POLLING_WRITING: - yield libpq.PQcancelSocket(pgcancelconn_ptr), WAIT_W - elif status == libpq.PGRES_POLLING_FAILED: - raise e.OperationalError( - f"cancellation failed: {cancel_conn.get_error_message()}" - ) - else: - raise e.InternalError(f"unexpected poll status: {status}") - - -def execute(pq.PGconn pgconn) -> PQGen[list[abc.PGresult]]: - """ - Generator sending a query and returning results without blocking. - - The query must have already been sent using `pgconn.send_query()` or - similar. Flush the query and then return the result using nonblocking - functions. - - Return the list of results returned by the database (whether success - or error). - """ - yield from send(pgconn) - rv = yield from fetch_many(pgconn) - return rv - - -def send(pq.PGconn pgconn) -> PQGen[None]: - """ - Generator to send a query to the server without blocking. - - The query must have already been sent using `pgconn.send_query()` or - similar. Flush the query and then return the result using nonblocking - functions. - - After this generator has finished you may want to cycle using `fetch()` - to retrieve the results available. - """ - cdef libpq.PGconn *pgconn_ptr = pgconn._pgconn_ptr - cdef int ready - cdef int cires - - while True: - if pgconn.flush() == 0: - break - - while True: - ready = yield WAIT_RW - if ready: - break - - if ready & READY_R: - with nogil: - # This call may read notifies which will be saved in the - # PGconn buffer and passed to Python later. - cires = libpq.PQconsumeInput(pgconn_ptr) - if 1 != cires: - raise e.OperationalError( - f"consuming input failed: {pgconn.get_error_message()}") - - -def fetch_many(pq.PGconn pgconn) -> PQGen[list[PGresult]]: - """ - Generator retrieving results from the database without blocking. - - The query must have already been sent to the server, so pgconn.flush() has - already returned 0. - - Return the list of results returned by the database (whether success - or error). - """ - cdef list results = [] - cdef int status - cdef pq.PGresult result - cdef libpq.PGresult *pgres - - while True: - try: - result = yield from fetch(pgconn) - except e.DatabaseError: - # What might have happened here is that a previuos error - # disconnected the connection, for example a idle in transaction - # timeout. Check if we had received an error before, and raise it - # as exception, because it should contain more details. See #988. - if any(result.status == libpq.PGRES_FATAL_ERROR for res in results): - break - else: - raise - - if result is None: - break - - results.append(result) - pgres = result._pgresult_ptr - - status = libpq.PQresultStatus(pgres) - if ( - status == libpq.PGRES_COPY_IN - or status == libpq.PGRES_COPY_OUT - or status == libpq.PGRES_COPY_BOTH - ): - # After entering copy mode the libpq will create a phony result - # for every request so let's break the endless loop. - break - - if status == libpq.PGRES_PIPELINE_SYNC: - # PIPELINE_SYNC is not followed by a NULL, but we return it alone - # similarly to other result sets. - break - - return results - - -def fetch(pq.PGconn pgconn) -> PQGen[PGresult | None]: - """ - Generator retrieving a single result from the database without blocking. - - The query must have already been sent to the server, so pgconn.flush() has - already returned 0. - - Return a result from the database (whether success or error). - """ - cdef libpq.PGconn *pgconn_ptr = pgconn._pgconn_ptr - cdef int cires, ibres - cdef libpq.PGresult *pgres - cdef object ready - - with nogil: - ibres = libpq.PQisBusy(pgconn_ptr) - if ibres: - while True: - ready = yield WAIT_R - if ready: - break - - while True: - with nogil: - cires = libpq.PQconsumeInput(pgconn_ptr) - if cires == 1: - ibres = libpq.PQisBusy(pgconn_ptr) - - if 1 != cires: - raise e.OperationalError( - f"consuming input failed: {pgconn.get_error_message()}") - if not ibres: - break - while True: - ready = yield WAIT_R - if ready: - break - - _consume_notifies(pgconn) - - with nogil: - pgres = libpq.PQgetResult(pgconn_ptr) - if pgres is NULL: - return None - return pq.PGresult._from_ptr(pgres) - - -def pipeline_communicate( - pq.PGconn pgconn, commands: deque[PipelineCommand] -) -> PQGen[list[list[PGresult]]]: - """Generator to send queries from a connection in pipeline mode while also - receiving results. - - Return a list results, including single PIPELINE_SYNC elements. - """ - cdef libpq.PGconn *pgconn_ptr = pgconn._pgconn_ptr - cdef int cires - cdef int status - cdef int ready - cdef libpq.PGresult *pgres - cdef list res = [] - cdef list results = [] - cdef pq.PGresult r - - while True: - while True: - ready = yield WAIT_RW - if ready: - break - - if ready & READY_R: - with nogil: - cires = libpq.PQconsumeInput(pgconn_ptr) - if 1 != cires: - raise e.OperationalError( - f"consuming input failed: {pgconn.get_error_message()}") - - _consume_notifies(pgconn) - - res: list[PGresult] = [] - while True: - with nogil: - ibres = libpq.PQisBusy(pgconn_ptr) - if ibres: - break - pgres = libpq.PQgetResult(pgconn_ptr) - - if pgres is NULL: - if not res: - break - results.append(res) - res = [] - else: - status = libpq.PQresultStatus(pgres) - r = pq.PGresult._from_ptr(pgres) - if status == libpq.PGRES_PIPELINE_SYNC: - results.append([r]) - elif ( - status == libpq.PGRES_COPY_IN - or status == libpq.PGRES_COPY_OUT - or status == libpq.PGRES_COPY_BOTH - ): - # This shouldn't happen, but insisting hard enough, it will. - # For instance, in test_executemany_badquery(), with the COPY - # statement and the AsyncClientCursor, which disables - # prepared statements). - # Bail out from the resulting infinite loop. - raise e.NotSupportedError( - "COPY cannot be used in pipeline mode" - ) - else: - res.append(r) - - if ready & READY_W: - pgconn.flush() - if not commands: - break - commands.popleft()() - - return results - - -cdef int _consume_notifies(pq.PGconn pgconn) except -1: - cdef object notify_handler = pgconn.notify_handler - cdef libpq.PGconn *pgconn_ptr - cdef libpq.PGnotify *notify - - if notify_handler is not None: - while True: - pynotify = pgconn.notifies() - if pynotify is None: - break - PyObject_CallFunctionObjArgs( - notify_handler, pynotify, NULL - ) - else: - pgconn_ptr = pgconn._pgconn_ptr - while True: - notify = libpq.PQnotifies(pgconn_ptr) - if notify is NULL: - break - libpq.PQfreemem(notify) - - return 0 diff --git a/psycopg_c/psycopg_c/_psycopg/oids.pxd b/psycopg_c/psycopg_c/_psycopg/oids.pxd deleted file mode 100644 index ca12ac179..000000000 --- a/psycopg_c/psycopg_c/_psycopg/oids.pxd +++ /dev/null @@ -1,93 +0,0 @@ -""" -Constants to refer to OIDS in C -""" - -# Copyright (C) 2020 The Psycopg Team - -# Use tools/update_oids.py to update this data. - -cdef enum: - INVALID_OID = 0 - - # autogenerated: start - - # Generated from PostgreSQL 17.0 - - ACLITEM_OID = 1033 - BIT_OID = 1560 - BOOL_OID = 16 - BOX_OID = 603 - BPCHAR_OID = 1042 - BYTEA_OID = 17 - CHAR_OID = 18 - CID_OID = 29 - CIDR_OID = 650 - CIRCLE_OID = 718 - DATE_OID = 1082 - DATEMULTIRANGE_OID = 4535 - DATERANGE_OID = 3912 - FLOAT4_OID = 700 - FLOAT8_OID = 701 - GTSVECTOR_OID = 3642 - INET_OID = 869 - INT2_OID = 21 - INT2VECTOR_OID = 22 - INT4_OID = 23 - INT4MULTIRANGE_OID = 4451 - INT4RANGE_OID = 3904 - INT8_OID = 20 - INT8MULTIRANGE_OID = 4536 - INT8RANGE_OID = 3926 - INTERVAL_OID = 1186 - JSON_OID = 114 - JSONB_OID = 3802 - JSONPATH_OID = 4072 - LINE_OID = 628 - LSEG_OID = 601 - MACADDR_OID = 829 - MACADDR8_OID = 774 - MONEY_OID = 790 - NAME_OID = 19 - NUMERIC_OID = 1700 - NUMMULTIRANGE_OID = 4532 - NUMRANGE_OID = 3906 - OID_OID = 26 - OIDVECTOR_OID = 30 - PATH_OID = 602 - PG_LSN_OID = 3220 - POINT_OID = 600 - POLYGON_OID = 604 - RECORD_OID = 2249 - REFCURSOR_OID = 1790 - REGCLASS_OID = 2205 - REGCOLLATION_OID = 4191 - REGCONFIG_OID = 3734 - REGDICTIONARY_OID = 3769 - REGNAMESPACE_OID = 4089 - REGOPER_OID = 2203 - REGOPERATOR_OID = 2204 - REGPROC_OID = 24 - REGPROCEDURE_OID = 2202 - REGROLE_OID = 4096 - REGTYPE_OID = 2206 - TEXT_OID = 25 - TID_OID = 27 - TIME_OID = 1083 - TIMESTAMP_OID = 1114 - TIMESTAMPTZ_OID = 1184 - TIMETZ_OID = 1266 - TSMULTIRANGE_OID = 4533 - TSQUERY_OID = 3615 - TSRANGE_OID = 3908 - TSTZMULTIRANGE_OID = 4534 - TSTZRANGE_OID = 3910 - TSVECTOR_OID = 3614 - TXID_SNAPSHOT_OID = 2970 - UUID_OID = 2950 - VARBIT_OID = 1562 - VARCHAR_OID = 1043 - XID_OID = 28 - XID8_OID = 5069 - XML_OID = 142 - - # autogenerated: end diff --git a/psycopg_c/psycopg_c/_psycopg/transform.pyx b/psycopg_c/psycopg_c/_psycopg/transform.pyx deleted file mode 100644 index 3a5fc9a87..000000000 --- a/psycopg_c/psycopg_c/_psycopg/transform.pyx +++ /dev/null @@ -1,638 +0,0 @@ -""" -Helper object to transform values between Python and PostgreSQL - -Cython implementation: can access to lower level C features without creating -too many temporary Python objects and performing less memory copying. - -""" - -# Copyright (C) 2020 The Psycopg Team - -cimport cython -from cpython.ref cimport Py_DECREF, Py_INCREF -from cpython.set cimport PySet_Add, PySet_Contains -from cpython.dict cimport PyDict_GetItem, PyDict_SetItem -from cpython.list cimport PyList_CheckExact, PyList_GET_ITEM, PyList_GET_SIZE -from cpython.list cimport PyList_New, PyList_SET_ITEM -from cpython.bytes cimport PyBytes_AS_STRING -from cpython.tuple cimport PyTuple_New, PyTuple_SET_ITEM -from cpython.object cimport PyObject, PyObject_CallFunctionObjArgs - -from typing import Any, Iterable, Sequence - -from psycopg import errors as e -from psycopg.pq import Format as PqFormat -from psycopg.rows import Row, RowMaker -from psycopg._encodings import conn_encoding - -NoneType = type(None) - -# internal structure: you are not supposed to know this. But it's worth some -# 10% of the innermost loop, so I'm willing to ask for forgiveness later... - -ctypedef struct PGresAttValue: - int len - char *value - -ctypedef struct pg_result_int: - # NOTE: it would be advised that we don't know this structure's content - int ntups - int numAttributes - libpq.PGresAttDesc *attDescs - PGresAttValue **tuples - # ...more members, which we ignore - - -@cython.freelist(16) -cdef class RowLoader: - cdef CLoader cloader - cdef object pyloader - cdef object loadfunc - - -@cython.freelist(16) -cdef class RowDumper: - cdef CDumper cdumper - cdef object pydumper - cdef object dumpfunc - cdef object oid - cdef object format - - -cdef class Transformer: - """ - An object that can adapt efficiently between Python and PostgreSQL. - - The life cycle of the object is the query, so it is assumed that attributes - such as the server version or the connection encoding will not change. The - object have its state so adapting several values of the same type can be - optimised. - - """ - - cdef readonly object connection - cdef readonly object adapters - cdef readonly object types - cdef readonly object formats - cdef str _encoding - cdef int _none_oid - - # mapping class -> Dumper instance (auto, text, binary) - cdef dict _auto_dumpers - cdef dict _text_dumpers - cdef dict _binary_dumpers - - # mapping oid -> Loader instance (text, binary) - cdef dict _text_loaders - cdef dict _binary_loaders - - # mapping oid -> Dumper instance (text, binary) - cdef dict _oid_text_dumpers - cdef dict _oid_binary_dumpers - - cdef pq.PGresult _pgresult - cdef int _nfields, _ntuples - cdef list _row_dumpers - cdef list _row_loaders - - cdef dict _oid_types - - def __cinit__(self, context: "AdaptContext" | None = None): - if context is not None: - self.adapters = context.adapters - self.connection = context.connection - else: - from psycopg import postgres - self.adapters = postgres.adapters - self.connection = None - - self.types = self.formats = None - self._none_oid = -1 - - @classmethod - def from_context(cls, context: "AdaptContext" | None): - """ - Return a Transformer from an AdaptContext. - - If the context is a Transformer instance, just return it. - """ - return _tx_from_context(context) - - @property - def encoding(self) -> str: - if not self._encoding: - self._encoding = conn_encoding(self.connection) - return self._encoding - - @property - def pgresult(self) -> PGresult | None: - return self._pgresult - - cpdef set_pgresult( - self, - pq.PGresult result, - object set_loaders = True, - object format = None - ): - self._pgresult = result - - if result is None: - self._nfields = self._ntuples = 0 - if set_loaders: - self._row_loaders = [] - return - - cdef libpq.PGresult *res = self._pgresult._pgresult_ptr - self._nfields = libpq.PQnfields(res) - self._ntuples = libpq.PQntuples(res) - - if not set_loaders: - return - - if not self._nfields: - self._row_loaders = [] - return - - if format is None: - format = libpq.PQfformat(res, 0) - - cdef list loaders = PyList_New(self._nfields) - cdef PyObject *row_loader - cdef object oid - - cdef int i - for i in range(self._nfields): - oid = libpq.PQftype(res, i) - row_loader = self._c_get_loader(oid, format) - Py_INCREF(row_loader) - PyList_SET_ITEM(loaders, i, row_loader) - - self._row_loaders = loaders - - def set_dumper_types(self, types: Sequence[int], format: PqFormat) -> None: - cdef Py_ssize_t ntypes = len(types) - dumpers = PyList_New(ntypes) - cdef int i - for i in range(ntypes): - oid = types[i] - dumper_ptr = self.get_dumper_by_oid( - oid, format) - Py_INCREF(dumper_ptr) - PyList_SET_ITEM(dumpers, i, dumper_ptr) - - self._row_dumpers = dumpers - self.types = tuple(types) - self.formats = [format] * ntypes - - def set_loader_types(self, types: Sequence[int], format: PqFormat) -> None: - self._c_loader_types(len(types), types, format) - - cdef void _c_loader_types(self, Py_ssize_t ntypes, list types, object format): - cdef list loaders = PyList_New(ntypes) - - # these are used more as Python object than C - cdef PyObject *oid - cdef PyObject *row_loader - for i in range(ntypes): - oid = PyList_GET_ITEM(types, i) - row_loader = self._c_get_loader(oid, format) - Py_INCREF(row_loader) - PyList_SET_ITEM(loaders, i, row_loader) - - self._row_loaders = loaders - - cpdef as_literal(self, obj): - cdef PyObject *row_dumper = self.get_row_dumper( - obj, PG_TEXT) - - if (row_dumper).cdumper is not None: - dumper = (row_dumper).cdumper - else: - dumper = (row_dumper).pydumper - - rv = dumper.quote(obj) - oid = dumper.oid - # If the result is quoted and the oid not unknown or text, - # add an explicit type cast. - # Check the last char because the first one might be 'E'. - if oid and oid != oids.TEXT_OID and rv and rv[-1] == 39: - if self._oid_types is None: - self._oid_types = {} - type_ptr = PyDict_GetItem(self._oid_types, oid) - if type_ptr == NULL: - type_sql = b"" - ti = self.adapters.types.get(oid) - if ti is not None: - if oid < 8192: - # builtin: prefer "timestamptz" to "timestamp with time zone" - type_sql = ti.name.encode(self.encoding) - else: - type_sql = ti.regtype.encode(self.encoding) - if oid == ti.array_oid: - type_sql += b"[]" - - type_ptr = type_sql - PyDict_SetItem(self._oid_types, oid, type_sql) - - if type_ptr: - rv = b"%s::%s" % (rv, type_ptr) - - return rv - - def get_dumper(self, obj, format) -> "Dumper": - cdef PyObject *row_dumper = self.get_row_dumper( - obj, format) - return (row_dumper).pydumper - - cdef PyObject *get_row_dumper(self, PyObject *obj, PyObject *fmt) except NULL: - """ - Return a borrowed reference to the RowDumper for the given obj/fmt. - """ - # Fast path: return a Dumper class already instantiated from the same type - cdef PyObject *cache - cdef PyObject *ptr - cdef PyObject *ptr1 - cdef RowDumper row_dumper - - # Normally, the type of the object dictates how to dump it - key = type(obj) - - # Establish where would the dumper be cached - bfmt = PyUnicode_AsUTF8String(fmt) - cdef char cfmt = PyBytes_AS_STRING(bfmt)[0] - if cfmt == b's': - if self._auto_dumpers is None: - self._auto_dumpers = {} - cache = self._auto_dumpers - elif cfmt == b'b': - if self._binary_dumpers is None: - self._binary_dumpers = {} - cache = self._binary_dumpers - elif cfmt == b't': - if self._text_dumpers is None: - self._text_dumpers = {} - cache = self._text_dumpers - else: - raise ValueError( - f"format should be a psycopg.adapt.Format, not {fmt}") - - # Reuse an existing Dumper class for objects of the same type - ptr = PyDict_GetItem(cache, key) - if ptr == NULL: - dcls = PyObject_CallFunctionObjArgs( - self.adapters.get_dumper, key, fmt, NULL) - dumper = PyObject_CallFunctionObjArgs( - dcls, key, self, NULL) - - row_dumper = _as_row_dumper(dumper) - PyDict_SetItem(cache, key, row_dumper) - ptr = row_dumper - - # Check if the dumper requires an upgrade to handle this specific value - if (ptr).cdumper is not None: - key1 = (ptr).cdumper.get_key(obj, fmt) - else: - key1 = PyObject_CallFunctionObjArgs( - (ptr).pydumper.get_key, obj, fmt, NULL) - if key1 is key: - return ptr - - # If it does, ask the dumper to create its own upgraded version - ptr1 = PyDict_GetItem(cache, key1) - if ptr1 != NULL: - return ptr1 - - if (ptr).cdumper is not None: - dumper = (ptr).cdumper.upgrade(obj, fmt) - else: - dumper = PyObject_CallFunctionObjArgs( - (ptr).pydumper.upgrade, obj, fmt, NULL) - - row_dumper = _as_row_dumper(dumper) - PyDict_SetItem(cache, key1, row_dumper) - return row_dumper - - cdef PyObject *get_dumper_by_oid(self, PyObject *oid, PyObject *fmt) except NULL: - """ - Return a borrowed reference to the RowDumper for the given oid/fmt. - """ - cdef PyObject *ptr - cdef PyObject *cache - cdef RowDumper row_dumper - - # Establish where would the dumper be cached - cdef int cfmt = fmt - if cfmt == 0: - if self._oid_text_dumpers is None: - self._oid_text_dumpers = {} - cache = self._oid_text_dumpers - elif cfmt == 1: - if self._oid_binary_dumpers is None: - self._oid_binary_dumpers = {} - cache = self._oid_binary_dumpers - else: - raise ValueError( - f"format should be a psycopg.pq.Format, not {fmt}") - - # Reuse an existing Dumper class for objects of the same type - ptr = PyDict_GetItem(cache, oid) - if ptr == NULL: - dcls = PyObject_CallFunctionObjArgs( - self.adapters.get_dumper_by_oid, oid, fmt, NULL) - dumper = PyObject_CallFunctionObjArgs( - dcls, NoneType, self, NULL) - - row_dumper = _as_row_dumper(dumper) - PyDict_SetItem(cache, oid, row_dumper) - ptr = row_dumper - - return ptr - - cpdef dump_sequence(self, object params, object formats): - # Verify that they are not none and that PyList_GET_ITEM won't blow up - cdef Py_ssize_t nparams = len(params) - cdef list out = PyList_New(nparams) - - cdef int i - cdef PyObject *dumper_ptr # borrowed pointer to row dumper - cdef object dumped - cdef Py_ssize_t size - - if self._none_oid < 0: - self._none_oid = self.adapters.get_dumper(NoneType, "s").oid - - dumpers = self._row_dumpers - - if dumpers: - for i in range(nparams): - param = params[i] - if param is not None: - dumper_ptr = PyList_GET_ITEM(dumpers, i) - if (dumper_ptr).cdumper is not None: - dumped = PyByteArray_FromStringAndSize("", 0) - size = (dumper_ptr).cdumper.cdump( - param, dumped, 0) - PyByteArray_Resize(dumped, size) - else: - dumped = PyObject_CallFunctionObjArgs( - (dumper_ptr).dumpfunc, - param, NULL) - else: - dumped = None - - Py_INCREF(dumped) - PyList_SET_ITEM(out, i, dumped) - - return out - - cdef tuple types = PyTuple_New(nparams) - cdef list pqformats = PyList_New(nparams) - - for i in range(nparams): - param = params[i] - if param is not None: - dumper_ptr = self.get_row_dumper( - param, formats[i]) - if (dumper_ptr).cdumper is not None: - dumped = PyByteArray_FromStringAndSize("", 0) - size = (dumper_ptr).cdumper.cdump( - param, dumped, 0) - PyByteArray_Resize(dumped, size) - else: - dumped = PyObject_CallFunctionObjArgs( - (dumper_ptr).dumpfunc, - param, NULL) - oid = (dumper_ptr).oid - fmt = (dumper_ptr).format - else: - dumped = None - oid = self._none_oid - fmt = PQ_TEXT - - Py_INCREF(dumped) - PyList_SET_ITEM(out, i, dumped) - - Py_INCREF(oid) - PyTuple_SET_ITEM(types, i, oid) - - Py_INCREF(fmt) - PyList_SET_ITEM(pqformats, i, fmt) - - self.types = types - self.formats = pqformats - return out - - def load_rows(self, int row0, int row1, object make_row) -> list[Row]: - if self._pgresult is None: - raise e.InterfaceError("result not set") - - if not (0 <= row0 <= self._ntuples and 0 <= row1 <= self._ntuples): - raise e.InterfaceError( - f"rows must be included between 0 and {self._ntuples}" - ) - - cdef libpq.PGresult *res = self._pgresult._pgresult_ptr - # cheeky access to the internal PGresult structure - cdef pg_result_int *ires = res - - cdef int row - cdef int col - cdef PGresAttValue *attval - cdef object record # not 'tuple' as it would check on assignment - - cdef object records = PyList_New(row1 - row0) - for row in range(row0, row1): - record = PyTuple_New(self._nfields) - Py_INCREF(record) - PyList_SET_ITEM(records, row - row0, record) - - cdef PyObject *loader # borrowed RowLoader - cdef PyObject *brecord # borrowed - row_loaders = self._row_loaders # avoid an incref/decref per item - - for col in range(self._nfields): - loader = PyList_GET_ITEM(row_loaders, col) - if (loader).cloader is not None: - for row in range(row0, row1): - brecord = PyList_GET_ITEM(records, row - row0) - attval = &(ires.tuples[row][col]) - if attval.len == -1: # NULL_LEN - pyval = None - else: - pyval = (loader).cloader.cload( - attval.value, attval.len) - - Py_INCREF(pyval) - PyTuple_SET_ITEM(brecord, col, pyval) - - else: - for row in range(row0, row1): - brecord = PyList_GET_ITEM(records, row - row0) - attval = &(ires.tuples[row][col]) - if attval.len == -1: # NULL_LEN - pyval = None - else: - b = PyMemoryView_FromObject( - ViewBuffer._from_buffer( - self._pgresult, - attval.value, attval.len)) - pyval = PyObject_CallFunctionObjArgs( - (loader).loadfunc, b, NULL) - - Py_INCREF(pyval) - PyTuple_SET_ITEM(brecord, col, pyval) - - if make_row is not tuple: - for i in range(row1 - row0): - brecord = PyList_GET_ITEM(records, i) - record = PyObject_CallFunctionObjArgs( - make_row, brecord, NULL) - Py_INCREF(record) - PyList_SET_ITEM(records, i, record) - Py_DECREF(brecord) - return records - - def load_row(self, int row, object make_row) -> Row | None: - if self._pgresult is None: - return None - - if not 0 <= row < self._ntuples: - return None - - cdef libpq.PGresult *res = self._pgresult._pgresult_ptr - # cheeky access to the internal PGresult structure - cdef pg_result_int *ires = res - - cdef PyObject *loader # borrowed RowLoader - cdef int col - cdef PGresAttValue *attval - cdef object record # not 'tuple' as it would check on assignment - - record = PyTuple_New(self._nfields) - row_loaders = self._row_loaders # avoid an incref/decref per item - - for col in range(self._nfields): - attval = &(ires.tuples[row][col]) - if attval.len == -1: # NULL_LEN - pyval = None - else: - loader = PyList_GET_ITEM(row_loaders, col) - if (loader).cloader is not None: - pyval = (loader).cloader.cload( - attval.value, attval.len) - else: - b = PyMemoryView_FromObject( - ViewBuffer._from_buffer( - self._pgresult, - attval.value, attval.len)) - pyval = PyObject_CallFunctionObjArgs( - (loader).loadfunc, b, NULL) - - Py_INCREF(pyval) - PyTuple_SET_ITEM(record, col, pyval) - - if make_row is not tuple: - record = PyObject_CallFunctionObjArgs( - make_row, record, NULL) - return record - - cpdef object load_sequence(self, record: Sequence[Buffer | None]): - cdef Py_ssize_t nfields = len(record) - out = PyTuple_New(nfields) - cdef PyObject *loader # borrowed RowLoader - cdef int col - cdef char *ptr - cdef Py_ssize_t size - - row_loaders = self._row_loaders # avoid an incref/decref per item - if PyList_GET_SIZE(row_loaders) != nfields: - raise e.ProgrammingError( - f"cannot load sequence of {nfields} items:" - f" {len(self._row_loaders)} loaders registered") - - for col in range(nfields): - item = record[col] - if item is None: - Py_INCREF(None) - PyTuple_SET_ITEM(out, col, None) - continue - - loader = PyList_GET_ITEM(row_loaders, col) - if (loader).cloader is not None: - _buffer_as_string_and_size(item, &ptr, &size) - pyval = (loader).cloader.cload(ptr, size) - else: - pyval = PyObject_CallFunctionObjArgs( - (loader).loadfunc, item, NULL) - - Py_INCREF(pyval) - PyTuple_SET_ITEM(out, col, pyval) - - return out - - def get_loader(self, oid: int, format: PqFormat) -> "Loader": - cdef PyObject *row_loader = self._c_get_loader( - oid, format) - return (row_loader).pyloader - - cdef PyObject *_c_get_loader(self, PyObject *oid, PyObject *fmt) except NULL: - """ - Return a borrowed reference to the RowLoader instance for given oid/fmt - """ - cdef PyObject *ptr - cdef PyObject *cache - - if fmt == PQ_TEXT: - if self._text_loaders is None: - self._text_loaders = {} - cache = self._text_loaders - elif fmt == PQ_BINARY: - if self._binary_loaders is None: - self._binary_loaders = {} - cache = self._binary_loaders - else: - raise ValueError( - f"format should be a psycopg.pq.Format, not {format}") - - ptr = PyDict_GetItem(cache, oid) - if ptr != NULL: - return ptr - - loader_cls = self.adapters.get_loader(oid, fmt) - if loader_cls is None: - loader_cls = self.adapters.get_loader(oids.INVALID_OID, fmt) - if loader_cls is None: - raise e.InterfaceError("unknown oid loader not found") - - loader = PyObject_CallFunctionObjArgs( - loader_cls, oid, self, NULL) - - cdef RowLoader row_loader = RowLoader() - row_loader.pyloader = loader - row_loader.loadfunc = loader.load - if isinstance(loader, CLoader): - row_loader.cloader = loader - - PyDict_SetItem(cache, oid, row_loader) - return row_loader - - -cdef object _as_row_dumper(object dumper): - cdef RowDumper row_dumper = RowDumper() - - row_dumper.pydumper = dumper - row_dumper.dumpfunc = dumper.dump - row_dumper.oid = dumper.oid - row_dumper.format = dumper.format - - if isinstance(dumper, CDumper): - row_dumper.cdumper = dumper - - return row_dumper - - -cdef Transformer _tx_from_context(object context): - if isinstance(context, Transformer): - return context - else: - return Transformer(context) diff --git a/psycopg_c/psycopg_c/_psycopg/waiting.pyx b/psycopg_c/psycopg_c/_psycopg/waiting.pyx deleted file mode 100644 index ed04bb472..000000000 --- a/psycopg_c/psycopg_c/_psycopg/waiting.pyx +++ /dev/null @@ -1,217 +0,0 @@ -""" -C implementation of waiting functions -""" - -# Copyright (C) 2022 The Psycopg Team - -from cpython.object cimport PyObject_CallFunctionObjArgs -from typing import TypeVar - -RV = TypeVar("RV") - - -cdef extern from *: - """ -#if defined(HAVE_POLL) && !defined(HAVE_BROKEN_POLL) - -#if defined(HAVE_POLL_H) -#include -#elif defined(HAVE_SYS_POLL_H) -#include -#endif - -#else /* no poll available */ - -#ifdef MS_WINDOWS -#include -#else -#include -#endif - -#endif /* HAVE_POLL */ - -#define SELECT_EV_READ 1 -#define SELECT_EV_WRITE 2 - -#define SEC_TO_MS 1000 -#define SEC_TO_US (1000 * 1000) - -/* Use select to wait for readiness on fileno. - * - * - Return SELECT_EV_* if the file is ready - * - Return 0 on timeout - * - Return -1 (and set an exception) on error. - * - * The wisdom of this function comes from: - * - * - PostgreSQL libpq (see src/interfaces/libpq/fe-misc.c) - * - Python select module (see Modules/selectmodule.c) - */ -static int -wait_c_impl(int fileno, int wait, float timeout) -{ - int select_rv; - int rv = -1; - -#if defined(HAVE_POLL) && !defined(HAVE_BROKEN_POLL) - - struct pollfd input_fd; - int timeout_ms; - - input_fd.fd = fileno; - input_fd.events = POLLERR; - input_fd.revents = 0; - - if (wait & SELECT_EV_READ) { input_fd.events |= POLLIN; } - if (wait & SELECT_EV_WRITE) { input_fd.events |= POLLOUT; } - - if (timeout < 0.0) { - timeout_ms = -1; - } else { - timeout_ms = (int)(timeout * SEC_TO_MS); - } - -retry_eintr: - - Py_BEGIN_ALLOW_THREADS - errno = 0; - select_rv = poll(&input_fd, 1, timeout_ms); - Py_END_ALLOW_THREADS - - /* The grace of PEP 475 */ - if (errno == EINTR) { - goto retry_eintr; - } - - if (PyErr_CheckSignals()) { goto finally; } - if (select_rv < 0) { goto error; } /* poll error */ - - rv = 0; /* success, maybe with timeout */ - if (select_rv >= 0) { - if (input_fd.events & POLLIN) { rv |= SELECT_EV_READ; } - if (input_fd.events & POLLOUT) { rv |= SELECT_EV_WRITE; } - } - -#else - - fd_set ifds; - fd_set ofds; - fd_set efds; - struct timeval tv, *tvptr; - -#ifndef MS_WINDOWS - if (fileno >= 1024) { - PyErr_SetString( - PyExc_ValueError, /* same exception of Python's 'select.select()' */ - "connection file descriptor out of range for 'select()'"); - return -1; - } -#endif - - FD_ZERO(&ifds); - FD_ZERO(&ofds); - FD_ZERO(&efds); - - if (wait & SELECT_EV_READ) { FD_SET(fileno, &ifds); } - if (wait & SELECT_EV_WRITE) { FD_SET(fileno, &ofds); } - FD_SET(fileno, &efds); - - /* Compute appropriate timeout interval */ - if (timeout < 0.0) { - tvptr = NULL; - } - else { - tv.tv_sec = (int)timeout; - tv.tv_usec = (int)(((long)timeout * SEC_TO_US) % SEC_TO_US); - tvptr = &tv; - } - -retry_eintr: - - Py_BEGIN_ALLOW_THREADS - errno = 0; - select_rv = select(fileno + 1, &ifds, &ofds, &efds, tvptr); - Py_END_ALLOW_THREADS - - /* The grace of PEP 475 */ - if (errno == EINTR) { - goto retry_eintr; - } - - if (PyErr_CheckSignals()) { goto finally; } - if (select_rv < 0) { goto error; } /* select error */ - - rv = 0; - if (select_rv > 0) { - if (FD_ISSET(fileno, &ifds)) { rv |= SELECT_EV_READ; } - if (FD_ISSET(fileno, &ofds)) { rv |= SELECT_EV_WRITE; } - } - -#endif /* HAVE_POLL */ - - return rv; - -error: - - rv = -1; - -#ifdef MS_WINDOWS - if (select_rv == SOCKET_ERROR) { - PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError()); - } -#else - if (select_rv < 0) { - PyErr_SetFromErrno(PyExc_OSError); - } -#endif - else { - PyErr_SetString(PyExc_OSError, "unexpected error from select()"); - } - -finally: - - return rv; - -} - """ - cdef int wait_c_impl(int fileno, int wait, float timeout) except -1 - - -def wait_c(gen: PQGen[RV], int fileno, interval = None) -> RV: - """ - Wait for a generator using poll or select. - """ - cdef float cinterval - cdef int wait, ready - cdef PyObject *pyready - - if interval is None: - cinterval = -1.0 - else: - cinterval = float(interval) - if cinterval < 0.0: - cinterval = -1.0 - - send = gen.send - - try: - wait = next(gen) - - while True: - ready = wait_c_impl(fileno, wait, cinterval) - if ready == READY_NONE: - pyready = PY_READY_NONE - elif ready == READY_R: - pyready = PY_READY_R - elif ready == READY_RW: - pyready = PY_READY_RW - elif ready == READY_W: - pyready = PY_READY_W - else: - raise AssertionError(f"unexpected ready value: {ready}") - - wait = PyObject_CallFunctionObjArgs(send, pyready, NULL) - - except StopIteration as ex: - rv: RV = ex.value - return rv diff --git a/psycopg_c/psycopg_c/_uuid.py b/psycopg_c/psycopg_c/_uuid.py deleted file mode 100644 index ec6b5338f..000000000 --- a/psycopg_c/psycopg_c/_uuid.py +++ /dev/null @@ -1,28 +0,0 @@ -""" -Internal objects to support the UUID adapters. -""" - -# Copyright (C) 2025 The Psycopg Team - -import uuid - -# Re-exports -UUID = uuid.UUID -SafeUUID_unknown = uuid.SafeUUID.unknown - - -class _WritableUUID(UUID): - """Temporary class, with the same memory layout of UUID, but writable. - - This class must have the same memory layout of the UUID class, so we can - create one, setting the `int` attribute, and changing the `__class__`, - which should be faster than calling the complex UUID.__init__ machinery. - - u = _WritableUUID() - u.is_safe = ... - u.int = ... - u.__class__ = UUID - """ - - __slots__ = () # Give the class the same memory layout of the base clasee - __setattr__ = object.__setattr__ # make the class writable diff --git a/psycopg_c/psycopg_c/pq.pxd b/psycopg_c/psycopg_c/pq.pxd deleted file mode 100644 index 8a2dbd634..000000000 --- a/psycopg_c/psycopg_c/pq.pxd +++ /dev/null @@ -1,85 +0,0 @@ -# Include pid_t but Windows doesn't have it -# Don't use "IF" so that the generated C is portable and can be included -# in the sdist. -cdef extern from * nogil: - """ -#if defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS) - typedef signed pid_t; -#else - #include -#endif - """ - ctypedef signed pid_t - -from psycopg_c.pq cimport libpq - -ctypedef char *(*conn_bytes_f) (const libpq.PGconn *) -ctypedef int(*conn_int_f) (const libpq.PGconn *) - - -cdef class PGconn: - cdef libpq.PGconn* _pgconn_ptr - cdef object __weakref__ - cdef public object notice_handler - cdef public object notify_handler - cdef pid_t _procpid - - @staticmethod - cdef PGconn _from_ptr(libpq.PGconn *ptr) - - cpdef int flush(self) except -1 - cpdef object notifies(self) - - -cdef class PGresult: - cdef libpq.PGresult* _pgresult_ptr - - @staticmethod - cdef PGresult _from_ptr(libpq.PGresult *ptr) - - -cdef class PGcancelConn: - cdef libpq.PGcancelConn* pgcancelconn_ptr - - @staticmethod - cdef PGcancelConn _from_ptr(libpq.PGcancelConn *ptr) - - -cdef class PGcancel: - cdef libpq.PGcancel* pgcancel_ptr - - @staticmethod - cdef PGcancel _from_ptr(libpq.PGcancel *ptr) - - -cdef class Escaping: - cdef PGconn conn - - cpdef escape_literal(self, data) - cpdef escape_identifier(self, data) - cpdef escape_string(self, data) - cpdef escape_bytea(self, data) - cpdef unescape_bytea(self, const unsigned char *data) - - -cdef class PQBuffer: - cdef unsigned char *buf - cdef Py_ssize_t len - - @staticmethod - cdef PQBuffer _from_buffer(unsigned char *buf, Py_ssize_t length) - - -cdef class ViewBuffer: - cdef unsigned char *buf - cdef Py_ssize_t len - cdef object obj - - @staticmethod - cdef ViewBuffer _from_buffer( - object obj, unsigned char *buf, Py_ssize_t length) - - -cdef int _buffer_as_string_and_size( - data: "Buffer", char **ptr, Py_ssize_t *length -) except -1 diff --git a/psycopg_c/psycopg_c/pq.pyx b/psycopg_c/psycopg_c/pq.pyx deleted file mode 100644 index 26f7779f8..000000000 --- a/psycopg_c/psycopg_c/pq.pyx +++ /dev/null @@ -1,37 +0,0 @@ -""" -libpq Python wrapper using cython bindings. -""" - -# Copyright (C) 2020 The Psycopg Team - -from psycopg_c.pq cimport libpq - -import logging - -from psycopg import errors as e -from psycopg.pq import Format - -logger = logging.getLogger("psycopg") - -__impl__ = 'c' -__build_version__ = libpq.PG_VERSION_NUM - - -def version(): - return libpq.PQlibVersion() - - -include "pq/pgconn.pyx" -include "pq/pgresult.pyx" -include "pq/pgcancel.pyx" -include "pq/conninfo.pyx" -include "pq/escaping.pyx" -include "pq/pqbuffer.pyx" - - -# importing the ssl module sets up Python's libcrypto callbacks -import ssl # noqa - -# disable libcrypto setup in libpq, so it won't stomp on the callbacks -# that have already been set up -libpq.PQinitOpenSSL(1, 0) diff --git a/psycopg_c/psycopg_c/pq/__init__.pxd b/psycopg_c/psycopg_c/pq/__init__.pxd deleted file mode 100644 index ce8c60c96..000000000 --- a/psycopg_c/psycopg_c/pq/__init__.pxd +++ /dev/null @@ -1,9 +0,0 @@ -""" -psycopg_c.pq cython module. - -This file is necessary to allow c-importing pxd files from this directory. -""" - -# Copyright (C) 2020 The Psycopg Team - -from psycopg_c.pq cimport libpq diff --git a/psycopg_c/psycopg_c/pq/conninfo.pyx b/psycopg_c/psycopg_c/pq/conninfo.pyx deleted file mode 100644 index 35fb887d9..000000000 --- a/psycopg_c/psycopg_c/pq/conninfo.pyx +++ /dev/null @@ -1,61 +0,0 @@ -""" -psycopg_c.pq.Conninfo object implementation. -""" - -# Copyright (C) 2020 The Psycopg Team - -from psycopg.pq.misc import ConninfoOption - - -class Conninfo: - @classmethod - def get_defaults(cls) -> list[ConninfoOption]: - cdef libpq.PQconninfoOption *opts = libpq.PQconndefaults() - if opts is NULL : - raise MemoryError("couldn't allocate connection defaults") - rv = _options_from_array(opts) - libpq.PQconninfoFree(opts) - return rv - - @classmethod - def parse(cls, const char *conninfo) -> list[ConninfoOption]: - cdef char *errmsg = NULL - cdef libpq.PQconninfoOption *opts = libpq.PQconninfoParse(conninfo, &errmsg) - if opts is NULL: - if errmsg is NULL: - raise MemoryError("couldn't allocate on conninfo parse") - else: - exc = e.OperationalError(errmsg.decode("utf8", "replace")) - libpq.PQfreemem(errmsg) - raise exc - - rv = _options_from_array(opts) - libpq.PQconninfoFree(opts) - return rv - - def __repr__(self): - return f"<{type(self).__name__} ({self.keyword.decode('ascii')})>" - - -cdef _options_from_array(libpq.PQconninfoOption *opts): - rv = [] - cdef int i = 0 - cdef libpq.PQconninfoOption* opt - while True: - opt = opts + i - if opt.keyword is NULL: - break - rv.append( - ConninfoOption( - keyword=opt.keyword, - envvar=opt.envvar if opt.envvar is not NULL else None, - compiled=opt.compiled if opt.compiled is not NULL else None, - val=opt.val if opt.val is not NULL else None, - label=opt.label if opt.label is not NULL else None, - dispchar=opt.dispchar if opt.dispchar is not NULL else None, - dispsize=opt.dispsize, - ) - ) - i += 1 - - return rv diff --git a/psycopg_c/psycopg_c/pq/escaping.pyx b/psycopg_c/psycopg_c/pq/escaping.pyx deleted file mode 100644 index 2956b2f59..000000000 --- a/psycopg_c/psycopg_c/pq/escaping.pyx +++ /dev/null @@ -1,132 +0,0 @@ -""" -psycopg_c.pq.Escaping object implementation. -""" - -# Copyright (C) 2020 The Psycopg Team - -from cpython.mem cimport PyMem_Free, PyMem_Malloc -from libc.string cimport strlen - - -cdef class Escaping: - def __init__(self, PGconn conn = None): - self.conn = conn - - cpdef escape_literal(self, data): - cdef char *out - cdef char *ptr - cdef Py_ssize_t length - - if self.conn is None: - raise e.OperationalError("escape_literal failed: no connection provided") - if self.conn._pgconn_ptr is NULL: - raise e.OperationalError("the connection is closed") - - _buffer_as_string_and_size(data, &ptr, &length) - - out = libpq.PQescapeLiteral(self.conn._pgconn_ptr, ptr, length) - if out is NULL: - raise e.OperationalError( - f"escape_literal failed: {self.conn.get_error_message())}" - ) - - rv = out[:strlen(out)] - libpq.PQfreemem(out) - return rv - - cpdef escape_identifier(self, data): - cdef char *out - cdef char *ptr - cdef Py_ssize_t length - - _buffer_as_string_and_size(data, &ptr, &length) - - if self.conn is None: - raise e.OperationalError("escape_identifier failed: no connection provided") - if self.conn._pgconn_ptr is NULL: - raise e.OperationalError("the connection is closed") - - out = libpq.PQescapeIdentifier(self.conn._pgconn_ptr, ptr, length) - if out is NULL: - raise e.OperationalError( - f"escape_identifier failed: {self.conn.get_error_message()}" - ) - - rv = out[:strlen(out)] - libpq.PQfreemem(out) - return rv - - cpdef escape_string(self, data): - cdef int error - cdef size_t len_out - cdef char *ptr - cdef char *buf_out - cdef Py_ssize_t length - - _buffer_as_string_and_size(data, &ptr, &length) - - if self.conn is not None: - if self.conn._pgconn_ptr is NULL: - raise e.OperationalError("the connection is closed") - - buf_out = PyMem_Malloc(length * 2 + 1) - len_out = libpq.PQescapeStringConn( - self.conn._pgconn_ptr, buf_out, ptr, length, &error - ) - if error: - PyMem_Free(buf_out) - raise e.OperationalError( - f"escape_string failed: {self.conn.get_error_message()}" - ) - - else: - buf_out = PyMem_Malloc(length * 2 + 1) - len_out = libpq.PQescapeString(buf_out, ptr, length) - - rv = buf_out[:len_out] - PyMem_Free(buf_out) - return rv - - cpdef escape_bytea(self, data): - cdef size_t len_out - cdef unsigned char *out - cdef char *ptr - cdef Py_ssize_t length - - if self.conn is not None and self.conn._pgconn_ptr is NULL: - raise e.OperationalError("the connection is closed") - - _buffer_as_string_and_size(data, &ptr, &length) - - if self.conn is not None: - out = libpq.PQescapeByteaConn( - self.conn._pgconn_ptr, ptr, length, &len_out) - else: - out = libpq.PQescapeBytea(ptr, length, &len_out) - - if out is NULL: - raise MemoryError( - f"couldn't allocate for escape_bytea of {len(data)} bytes" - ) - - rv = out[:len_out - 1] # out includes final 0 - libpq.PQfreemem(out) - return rv - - cpdef unescape_bytea(self, const unsigned char *data): - # not needed, but let's keep it symmetric with the escaping: - # if a connection is passed in, it must be valid. - if self.conn is not None: - if self.conn._pgconn_ptr is NULL: - raise e.OperationalError("the connection is closed") - - cdef size_t len_out - cdef unsigned char *out = libpq.PQunescapeBytea(data, &len_out) - if out is NULL: - raise MemoryError( - f"couldn't allocate for unescape_bytea of {len(data)} bytes" - ) - - rv = out[:len_out] - libpq.PQfreemem(out) - return rv diff --git a/psycopg_c/psycopg_c/pq/libpq.pxd b/psycopg_c/psycopg_c/pq/libpq.pxd deleted file mode 100644 index ada48d674..000000000 --- a/psycopg_c/psycopg_c/pq/libpq.pxd +++ /dev/null @@ -1,364 +0,0 @@ -""" -Libpq header definition for the cython psycopg.pq implementation. -""" - -# Copyright (C) 2020 The Psycopg Team - -cdef extern from "stdio.h": - - ctypedef struct FILE: - pass - -cdef extern from "pg_config.h": - - int PG_VERSION_NUM - - -cdef extern from "libpq-fe.h": - - # structures and types - - ctypedef unsigned int Oid - - ctypedef struct PGconn: - pass - - ctypedef struct PGresult: - pass - - ctypedef struct PQconninfoOption: - char *keyword - char *envvar - char *compiled - char *val - char *label - char *dispchar - int dispsize - - ctypedef struct PGnotify: - char *relname - int be_pid - char *extra - - ctypedef struct PGcancelConn: - pass - - ctypedef struct PGcancel: - pass - - ctypedef struct PGresAttDesc: - char *name - Oid tableid - int columnid - int format - Oid typid - int typlen - int atttypmod - - # enums - - # Check in src/interfaces/libpq/libpq-fe.h for updates. - - ctypedef enum PostgresPollingStatusType: - PGRES_POLLING_FAILED = 0 - PGRES_POLLING_READING - PGRES_POLLING_WRITING - PGRES_POLLING_OK - PGRES_POLLING_ACTIVE - - - ctypedef enum PGPing: - PQPING_OK - PQPING_REJECT - PQPING_NO_RESPONSE - PQPING_NO_ATTEMPT - - ctypedef enum ConnStatusType: - CONNECTION_OK - CONNECTION_BAD - CONNECTION_STARTED - CONNECTION_MADE - CONNECTION_AWAITING_RESPONSE - CONNECTION_AUTH_OK - CONNECTION_SETENV - CONNECTION_SSL_STARTUP - CONNECTION_NEEDED - CONNECTION_CHECK_WRITABLE - CONNECTION_CONSUME - CONNECTION_GSS_STARTUP - CONNECTION_CHECK_TARGET - CONNECTION_CHECK_STANDBY - CONNECTION_ALLOCATED - - ctypedef enum PGTransactionStatusType: - PQTRANS_IDLE - PQTRANS_ACTIVE - PQTRANS_INTRANS - PQTRANS_INERROR - PQTRANS_UNKNOWN - - ctypedef enum ExecStatusType: - PGRES_EMPTY_QUERY = 0 - PGRES_COMMAND_OK - PGRES_TUPLES_OK - PGRES_COPY_OUT - PGRES_COPY_IN - PGRES_BAD_RESPONSE - PGRES_NONFATAL_ERROR - PGRES_FATAL_ERROR - PGRES_COPY_BOTH - PGRES_SINGLE_TUPLE - PGRES_PIPELINE_SYNC - PGRES_PIPELINE_ABORTED - PGRES_TUPLES_CHUNK - - # 33.1. Database Connection Control Functions - PGconn *PQconnectdb(const char *conninfo) - PGconn *PQconnectStart(const char *conninfo) - PostgresPollingStatusType PQconnectPoll(PGconn *conn) nogil - PQconninfoOption *PQconndefaults() - PQconninfoOption *PQconninfo(PGconn *conn) - PQconninfoOption *PQconninfoParse(const char *conninfo, char **errmsg) - void PQfinish(PGconn *conn) - void PQreset(PGconn *conn) - int PQresetStart(PGconn *conn) - PostgresPollingStatusType PQresetPoll(PGconn *conn) - PGPing PQping(const char *conninfo) - - # 33.2. Connection Status Functions - char *PQdb(const PGconn *conn) - char *PQuser(const PGconn *conn) - char *PQpass(const PGconn *conn) - char *PQhost(const PGconn *conn) - char *PQhostaddr(const PGconn *conn) - char *PQport(const PGconn *conn) - char *PQtty(const PGconn *conn) - char *PQoptions(const PGconn *conn) - ConnStatusType PQstatus(const PGconn *conn) - PGTransactionStatusType PQtransactionStatus(const PGconn *conn) - const char *PQparameterStatus(const PGconn *conn, const char *paramName) - int PQprotocolVersion(const PGconn *conn) - int PQserverVersion(const PGconn *conn) - char *PQerrorMessage(const PGconn *conn) - int PQsocket(const PGconn *conn) nogil - int PQbackendPID(const PGconn *conn) - int PQconnectionNeedsPassword(const PGconn *conn) - int PQconnectionUsedPassword(const PGconn *conn) - int PQsslInUse(PGconn *conn) # TODO: const in PG 12 docs - verify/report - # TODO: PQsslAttribute, PQsslAttributeNames, PQsslStruct, PQgetssl - - # 33.3. Command Execution Functions - PGresult *PQexec(PGconn *conn, const char *command) nogil - PGresult *PQexecParams(PGconn *conn, - const char *command, - int nParams, - const Oid *paramTypes, - const char * const *paramValues, - const int *paramLengths, - const int *paramFormats, - int resultFormat) nogil - PGresult *PQprepare(PGconn *conn, - const char *stmtName, - const char *query, - int nParams, - const Oid *paramTypes) nogil - PGresult *PQexecPrepared(PGconn *conn, - const char *stmtName, - int nParams, - const char * const *paramValues, - const int *paramLengths, - const int *paramFormats, - int resultFormat) nogil - PGresult *PQdescribePrepared(PGconn *conn, const char *stmtName) nogil - PGresult *PQdescribePortal(PGconn *conn, const char *portalName) nogil - PGresult *PQclosePrepared(PGconn *conn, const char *stmtName) nogil - PGresult *PQclosePortal(PGconn *conn, const char *portalName) nogil - ExecStatusType PQresultStatus(const PGresult *res) nogil - # PQresStatus: not needed, we have pretty enums - char *PQresultErrorMessage(const PGresult *res) nogil - # TODO: PQresultVerboseErrorMessage - char *PQresultErrorField(const PGresult *res, int fieldcode) nogil - void PQclear(PGresult *res) nogil - - # 33.3.2. Retrieving Query Result Information - int PQntuples(const PGresult *res) - int PQnfields(const PGresult *res) - char *PQfname(const PGresult *res, int column_number) - int PQfnumber(const PGresult *res, const char *column_name) - Oid PQftable(const PGresult *res, int column_number) - int PQftablecol(const PGresult *res, int column_number) - int PQfformat(const PGresult *res, int column_number) - Oid PQftype(const PGresult *res, int column_number) - int PQfmod(const PGresult *res, int column_number) - int PQfsize(const PGresult *res, int column_number) - int PQbinaryTuples(const PGresult *res) - char *PQgetvalue(const PGresult *res, int row_number, int column_number) - int PQgetisnull(const PGresult *res, int row_number, int column_number) - int PQgetlength(const PGresult *res, int row_number, int column_number) - int PQnparams(const PGresult *res) - Oid PQparamtype(const PGresult *res, int param_number) - # PQprint: pretty useless - - # 33.3.3. Retrieving Other Result Information - char *PQcmdStatus(PGresult *res) - char *PQcmdTuples(PGresult *res) - Oid PQoidValue(const PGresult *res) - - # 33.3.4. Escaping Strings for Inclusion in SQL Commands - char *PQescapeIdentifier(PGconn *conn, const char *str, size_t length) - char *PQescapeLiteral(PGconn *conn, const char *str, size_t length) - size_t PQescapeStringConn(PGconn *conn, - char *to, const char *from_, size_t length, - int *error) - size_t PQescapeString(char *to, const char *from_, size_t length) - unsigned char *PQescapeByteaConn(PGconn *conn, - const unsigned char *src, - size_t from_length, - size_t *to_length) - unsigned char *PQescapeBytea(const unsigned char *src, - size_t from_length, - size_t *to_length) - unsigned char *PQunescapeBytea(const unsigned char *src, size_t *to_length) - - - # 33.4. Asynchronous Command Processing - int PQsendQuery(PGconn *conn, const char *command) nogil - int PQsendQueryParams(PGconn *conn, - const char *command, - int nParams, - const Oid *paramTypes, - const char * const *paramValues, - const int *paramLengths, - const int *paramFormats, - int resultFormat) nogil - int PQsendPrepare(PGconn *conn, - const char *stmtName, - const char *query, - int nParams, - const Oid *paramTypes) nogil - int PQsendQueryPrepared(PGconn *conn, - const char *stmtName, - int nParams, - const char * const *paramValues, - const int *paramLengths, - const int *paramFormats, - int resultFormat) nogil - int PQsendDescribePrepared(PGconn *conn, const char *stmtName) nogil - int PQsendDescribePortal(PGconn *conn, const char *portalName) nogil - int PQsendClosePrepared(PGconn *conn, const char *stmtName) nogil - int PQsendClosePortal(PGconn *conn, const char *portalName) nogil - PGresult *PQgetResult(PGconn *conn) nogil - int PQconsumeInput(PGconn *conn) nogil - int PQisBusy(PGconn *conn) nogil - int PQsetnonblocking(PGconn *conn, int arg) nogil - int PQisnonblocking(const PGconn *conn) - int PQflush(PGconn *conn) nogil - - # 32.6. Retrieving Query Results in Chunks - int PQsetSingleRowMode(PGconn *conn) - int PQsetChunkedRowsMode(PGconn *conn, int chunkSize) - - # 34.7. Canceling Queries in Progress - PGcancelConn *PQcancelCreate(PGconn *conn) - int PQcancelStart(PGcancelConn *cancelConn) - int PQcancelBlocking(PGcancelConn *cancelConn) - PostgresPollingStatusType PQcancelPoll(PGcancelConn *cancelConn) nogil - ConnStatusType PQcancelStatus(const PGcancelConn *cancelConn) - int PQcancelSocket(PGcancelConn *cancelConn) - char *PQcancelErrorMessage(const PGcancelConn *cancelConn) - void PQcancelReset(PGcancelConn *cancelConn) - void PQcancelFinish(PGcancelConn *cancelConn) - PGcancel *PQgetCancel(PGconn *conn) - void PQfreeCancel(PGcancel *cancel) - int PQcancel(PGcancel *cancel, char *errbuf, int errbufsize) - - # 33.8. Asynchronous Notification - PGnotify *PQnotifies(PGconn *conn) nogil - - # 33.9. Functions Associated with the COPY Command - int PQputCopyData(PGconn *conn, const char *buffer, int nbytes) nogil - int PQputCopyEnd(PGconn *conn, const char *errormsg) nogil - int PQgetCopyData(PGconn *conn, char **buffer, int async) nogil - - # 33.10. Control Functions - void PQtrace(PGconn *conn, FILE *stream); - void PQsetTraceFlags(PGconn *conn, int flags); - void PQuntrace(PGconn *conn); - - # 33.11. Miscellaneous Functions - void PQfreemem(void *ptr) nogil - void PQconninfoFree(PQconninfoOption *connOptions) - char *PQencryptPasswordConn( - PGconn *conn, const char *passwd, const char *user, const char *algorithm); - PGresult *PQchangePassword(PGconn *conn, const char *user, const char *passwd); - PGresult *PQmakeEmptyPGresult(PGconn *conn, ExecStatusType status) - int PQsetResultAttrs(PGresult *res, int numAttributes, PGresAttDesc *attDescs) - int PQlibVersion() - - # 33.12. Notice Processing - ctypedef void (*PQnoticeReceiver)(void *arg, const PGresult *res) - PQnoticeReceiver PQsetNoticeReceiver( - PGconn *conn, PQnoticeReceiver prog, void *arg) - - # 33.18. SSL Support - void PQinitOpenSSL(int do_ssl, int do_crypto) - - # 34.5 Pipeline Mode - - ctypedef enum PGpipelineStatus: - PQ_PIPELINE_OFF - PQ_PIPELINE_ON - PQ_PIPELINE_ABORTED - - PGpipelineStatus PQpipelineStatus(const PGconn *conn) - int PQenterPipelineMode(PGconn *conn) - int PQexitPipelineMode(PGconn *conn) - int PQpipelineSync(PGconn *conn) - int PQsendFlushRequest(PGconn *conn) - -cdef extern from *: - """ -/* Hack to allow the use of old libpq versions */ -#if PG_VERSION_NUM < 100000 -#define PQencryptPasswordConn(conn, passwd, user, algorithm) NULL -#endif - -#if PG_VERSION_NUM < 120000 -#define PQhostaddr(conn) NULL -#endif - -#if PG_VERSION_NUM < 140000 -#define PGRES_PIPELINE_SYNC 10 -#define PGRES_PIPELINE_ABORTED 11 -typedef enum { - PQ_PIPELINE_OFF, - PQ_PIPELINE_ON, - PQ_PIPELINE_ABORTED -} PGpipelineStatus; -#define PQpipelineStatus(conn) PQ_PIPELINE_OFF -#define PQenterPipelineMode(conn) 0 -#define PQexitPipelineMode(conn) 1 -#define PQpipelineSync(conn) 0 -#define PQsendFlushRequest(conn) 0 -#define PQsetTraceFlags(conn, stream) do {} while (0) -#endif - -#if PG_VERSION_NUM < 170000 -typedef struct pg_cancel_conn PGcancelConn; -#define PQchangePassword(conn, user, passwd) NULL -#define PQclosePrepared(conn, name) NULL -#define PQclosePortal(conn, name) NULL -#define PQsendClosePrepared(conn, name) 0 -#define PQsendClosePortal(conn, name) 0 -#define PQcancelCreate(conn) NULL -#define PQcancelStart(cancelConn) 0 -#define PQcancelBlocking(cancelConn) 0 -#define PQcancelPoll(cancelConn) CONNECTION_OK -#define PQcancelStatus(cancelConn) 0 -#define PQcancelSocket(cancelConn) -1 -#define PQcancelErrorMessage(cancelConn) NULL -#define PQcancelReset(cancelConn) 0 -#define PQcancelFinish(cancelConn) 0 -#define PQsetChunkedRowsMode(conn, chunkSize) 0 -#endif -""" diff --git a/psycopg_c/psycopg_c/pq/pgcancel.pyx b/psycopg_c/psycopg_c/pq/pgcancel.pyx deleted file mode 100644 index 8918d1537..000000000 --- a/psycopg_c/psycopg_c/pq/pgcancel.pyx +++ /dev/null @@ -1,103 +0,0 @@ -""" -psycopg_c.pq.PGcancel object implementation. -""" - -# Copyright (C) 2020 The Psycopg Team - - -cdef class PGcancelConn: - def __cinit__(self): - self.pgcancelconn_ptr = NULL - - @staticmethod - cdef PGcancelConn _from_ptr(libpq.PGcancelConn *ptr): - cdef PGcancelConn rv = PGcancelConn.__new__(PGcancelConn) - rv.pgcancelconn_ptr = ptr - return rv - - def __dealloc__(self) -> None: - self.finish() - - def start(self) -> None: - """Requests that the server abandons processing of the current command - in a non-blocking manner. - - See :pq:`PQcancelStart` for details. - """ - if not libpq.PQcancelStart(self.pgcancelconn_ptr): - raise e.OperationalError( - f"couldn't send cancellation: {self.get_error_message()}" - ) - - def blocking(self) -> None: - """Requests that the server abandons processing of the current command - in a blocking manner. - - See :pq:`PQcancelBlocking` for details. - """ - if not libpq.PQcancelBlocking(self.pgcancelconn_ptr): - raise e.OperationalError( - f"couldn't send cancellation: {self.get_error_message()}" - ) - - def poll(self) -> int: - self._ensure_pgcancelconn() - return libpq.PQcancelPoll(self.pgcancelconn_ptr) - - @property - def status(self) -> int: - return libpq.PQcancelStatus(self.pgcancelconn_ptr) - - @property - def socket(self) -> int: - rv = libpq.PQcancelSocket(self.pgcancelconn_ptr) - if rv == -1: - raise e.OperationalError("cancel connection not opened") - return rv - - @property - def error_message(self) -> bytes: - return libpq.PQcancelErrorMessage(self.pgcancelconn_ptr) - - def get_error_message(self, encoding: str = "utf-8") -> str: - return _clean_error_message(self.error_message, encoding) - - def reset(self) -> None: - self._ensure_pgcancelconn() - libpq.PQcancelReset(self.pgcancelconn_ptr) - - def finish(self) -> None: - if self.pgcancelconn_ptr is not NULL: - libpq.PQcancelFinish(self.pgcancelconn_ptr) - self.pgcancelconn_ptr = NULL - - def _ensure_pgcancelconn(self) -> None: - if self.pgcancelconn_ptr is NULL: - raise e.OperationalError("the cancel connection is closed") - - -cdef class PGcancel: - def __cinit__(self): - self.pgcancel_ptr = NULL - - @staticmethod - cdef PGcancel _from_ptr(libpq.PGcancel *ptr): - cdef PGcancel rv = PGcancel.__new__(PGcancel) - rv.pgcancel_ptr = ptr - return rv - - def __dealloc__(self) -> None: - self.free() - - def free(self) -> None: - if self.pgcancel_ptr is not NULL: - libpq.PQfreeCancel(self.pgcancel_ptr) - self.pgcancel_ptr = NULL - - def cancel(self) -> None: - cdef char buf[256] - cdef int res = libpq.PQcancel(self.pgcancel_ptr, buf, sizeof(buf)) - if not res: - raise e.OperationalError( - f"cancel failed: {buf.decode('utf8', 'ignore')}" - ) diff --git a/psycopg_c/psycopg_c/pq/pgconn.pyx b/psycopg_c/psycopg_c/pq/pgconn.pyx deleted file mode 100644 index 7c4eccb83..000000000 --- a/psycopg_c/psycopg_c/pq/pgconn.pyx +++ /dev/null @@ -1,798 +0,0 @@ -""" -psycopg_c.pq.PGconn object implementation. -""" - -# Copyright (C) 2020 The Psycopg Team - -cdef extern from * nogil: - """ -#if defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS) - /* We don't need a real definition for this because Windows is not affected - * by the issue caused by closing the fds after fork. - */ - #define getpid() (0) -#else - #include -#endif - """ - pid_t getpid() - -from libc.stdio cimport fdopen -from cpython.mem cimport PyMem_Free, PyMem_Malloc -from cpython.bytes cimport PyBytes_AsString -from cpython.memoryview cimport PyMemoryView_FromObject - -import sys - -from psycopg.pq import Format as PqFormat -from psycopg.pq import Trace, version_pretty -from psycopg.pq.misc import PGnotify, _clean_error_message, connection_summary -from psycopg.pq._enums import ExecStatus -from psycopg._encodings import pg2pyenc - -from psycopg_c.pq cimport PQBuffer - - -cdef object _check_supported(fname, int pgversion): - if libpq.PG_VERSION_NUM < pgversion: - raise e.NotSupportedError( - f"{fname} requires libpq from PostgreSQL {version_pretty(pgversion)}" - f" on the client; version {version_pretty(libpq.PG_VERSION_NUM)}" - " available instead" - ) - -cdef class PGconn: - @staticmethod - cdef PGconn _from_ptr(libpq.PGconn *ptr): - cdef PGconn rv = PGconn.__new__(PGconn) - rv._pgconn_ptr = ptr - - libpq.PQsetNoticeReceiver( - ptr, notice_receiver, rv) - return rv - - def __cinit__(self): - self._pgconn_ptr = NULL - self._procpid = getpid() - - def __dealloc__(self): - # Close the connection only if it was created in this process, - # not if this object is being GC'd after fork. - if self._procpid == getpid(): - self.finish() - - def __repr__(self) -> str: - cls = f"{self.__class__.__module__}.{self.__class__.__qualname__}" - info = connection_summary(self) - return f"<{cls} {info} at 0x{id(self):x}>" - - @classmethod - def connect(cls, const char *conninfo) -> PGconn: - cdef libpq.PGconn* pgconn = libpq.PQconnectdb(conninfo) - if not pgconn: - raise MemoryError("couldn't allocate PGconn") - - return PGconn._from_ptr(pgconn) - - @classmethod - def connect_start(cls, const char *conninfo) -> PGconn: - cdef libpq.PGconn* pgconn = libpq.PQconnectStart(conninfo) - if not pgconn: - raise MemoryError("couldn't allocate PGconn") - - return PGconn._from_ptr(pgconn) - - def connect_poll(self) -> int: - return _call_int(self, libpq.PQconnectPoll) - - def finish(self) -> None: - if self._pgconn_ptr is not NULL: - libpq.PQfinish(self._pgconn_ptr) - self._pgconn_ptr = NULL - - @property - def pgconn_ptr(self) -> int | None: - if self._pgconn_ptr: - return self._pgconn_ptr - else: - return None - - @property - def info(self) -> list[ConninfoOption]: - _ensure_pgconn(self) - cdef libpq.PQconninfoOption *opts = libpq.PQconninfo(self._pgconn_ptr) - if opts is NULL: - raise MemoryError("couldn't allocate connection info") - rv = _options_from_array(opts) - libpq.PQconninfoFree(opts) - return rv - - def reset(self) -> None: - _ensure_pgconn(self) - libpq.PQreset(self._pgconn_ptr) - - def reset_start(self) -> None: - if not libpq.PQresetStart(self._pgconn_ptr): - raise e.OperationalError("couldn't reset connection") - - def reset_poll(self) -> int: - return _call_int(self, libpq.PQresetPoll) - - @classmethod - def ping(self, const char *conninfo) -> int: - return libpq.PQping(conninfo) - - @property - def db(self) -> bytes: - return _call_bytes(self, libpq.PQdb) - - @property - def user(self) -> bytes: - return _call_bytes(self, libpq.PQuser) - - @property - def password(self) -> bytes: - return _call_bytes(self, libpq.PQpass) - - @property - def host(self) -> bytes: - return _call_bytes(self, libpq.PQhost) - - @property - def hostaddr(self) -> bytes: - _check_supported("PQhostaddr", 120000) - _ensure_pgconn(self) - cdef char *rv = libpq.PQhostaddr(self._pgconn_ptr) - assert rv is not NULL - return rv - - @property - def port(self) -> bytes: - return _call_bytes(self, libpq.PQport) - - @property - def tty(self) -> bytes: - return _call_bytes(self, libpq.PQtty) - - @property - def options(self) -> bytes: - return _call_bytes(self, libpq.PQoptions) - - @property - def status(self) -> int: - return libpq.PQstatus(self._pgconn_ptr) - - @property - def transaction_status(self) -> int: - return libpq.PQtransactionStatus(self._pgconn_ptr) - - def parameter_status(self, const char *name) -> bytes | None: - _ensure_pgconn(self) - cdef const char *rv = libpq.PQparameterStatus(self._pgconn_ptr, name) - if rv is not NULL: - return rv - else: - return None - - @property - def error_message(self) -> bytes: - return libpq.PQerrorMessage(self._pgconn_ptr) - - def get_error_message(self, encoding: str = "") -> str: - return _clean_error_message(self.error_message, encoding or self._encoding) - - @property - def _encoding(self) -> str: - cdef const char *pgenc - if libpq.PQstatus(self._pgconn_ptr) == libpq.CONNECTION_OK: - pgenc = libpq.PQparameterStatus(self._pgconn_ptr, b"client_encoding") - if pgenc is NULL: - pgenc = b"UTF8" - return pg2pyenc(pgenc) - else: - return "utf-8" - - @property - def protocol_version(self) -> int: - return _call_int(self, libpq.PQprotocolVersion) - - @property - def server_version(self) -> int: - return _call_int(self, libpq.PQserverVersion) - - @property - def socket(self) -> int: - rv = _call_int(self, libpq.PQsocket) - if rv == -1: - raise e.OperationalError("the connection is lost") - return rv - - @property - def backend_pid(self) -> int: - return _call_int(self, libpq.PQbackendPID) - - @property - def needs_password(self) -> bool: - return bool(libpq.PQconnectionNeedsPassword(self._pgconn_ptr)) - - @property - def used_password(self) -> bool: - return bool(libpq.PQconnectionUsedPassword(self._pgconn_ptr)) - - @property - def ssl_in_use(self) -> bool: - return bool(_call_int(self, libpq.PQsslInUse)) - - def exec_(self, const char *command) -> PGresult: - _ensure_pgconn(self) - cdef libpq.PGresult *pgresult - with nogil: - pgresult = libpq.PQexec(self._pgconn_ptr, command) - if pgresult is NULL: - raise e.OperationalError(f"executing query failed: {self.get_error_message()}") - - return PGresult._from_ptr(pgresult) - - def send_query(self, const char *command) -> None: - _ensure_pgconn(self) - cdef int rv - with nogil: - rv = libpq.PQsendQuery(self._pgconn_ptr, command) - if not rv: - raise e.OperationalError(f"sending query failed: {self.get_error_message()}") - - def exec_params( - self, - const char *command, - param_values: Sequence[bytes | None] | None, - param_types: Sequence[int] | None = None, - param_formats: Sequence[int] | None = None, - int result_format = PqFormat.TEXT, - ) -> PGresult: - _ensure_pgconn(self) - - cdef Py_ssize_t cnparams - cdef libpq.Oid *ctypes - cdef char *const *cvalues - cdef int *clengths - cdef int *cformats - cnparams, ctypes, cvalues, clengths, cformats = _query_params_args( - param_values, param_types, param_formats) - - cdef libpq.PGresult *pgresult - with nogil: - pgresult = libpq.PQexecParams( - self._pgconn_ptr, command, cnparams, ctypes, - cvalues, clengths, cformats, result_format) - _clear_query_params(ctypes, cvalues, clengths, cformats) - if pgresult is NULL: - raise e.OperationalError(f"executing query failed: {self.get_error_message()}") - return PGresult._from_ptr(pgresult) - - def send_query_params( - self, - const char *command, - param_values: Sequence[bytes | None] | None, - param_types: Sequence[int] | None = None, - param_formats: Sequence[int] | None = None, - int result_format = PqFormat.TEXT, - ) -> None: - _ensure_pgconn(self) - - cdef Py_ssize_t cnparams - cdef libpq.Oid *ctypes - cdef char *const *cvalues - cdef int *clengths - cdef int *cformats - cnparams, ctypes, cvalues, clengths, cformats = _query_params_args( - param_values, param_types, param_formats) - - cdef int rv - with nogil: - rv = libpq.PQsendQueryParams( - self._pgconn_ptr, command, cnparams, ctypes, - cvalues, clengths, cformats, result_format) - _clear_query_params(ctypes, cvalues, clengths, cformats) - if not rv: - raise e.OperationalError( - f"sending query and params failed: {self.get_error_message()}" - ) - - def send_prepare( - self, - const char *name, - const char *command, - param_types: Sequence[int] | None = None, - ) -> None: - _ensure_pgconn(self) - - cdef int i - cdef Py_ssize_t nparams = len(param_types) if param_types else 0 - cdef libpq.Oid *atypes = NULL - if nparams: - atypes = PyMem_Malloc(nparams * sizeof(libpq.Oid)) - for i in range(nparams): - atypes[i] = param_types[i] - - cdef int rv - with nogil: - rv = libpq.PQsendPrepare( - self._pgconn_ptr, name, command, nparams, atypes - ) - PyMem_Free(atypes) - if not rv: - raise e.OperationalError( - f"sending query and params failed: {self.get_error_message()}" - ) - - def send_query_prepared( - self, - const char *name, - param_values: Sequence[bytes | None] | None, - param_formats: Sequence[int] | None = None, - int result_format = PqFormat.TEXT, - ) -> None: - _ensure_pgconn(self) - - cdef Py_ssize_t cnparams - cdef libpq.Oid *ctypes - cdef char *const *cvalues - cdef int *clengths - cdef int *cformats - cnparams, ctypes, cvalues, clengths, cformats = _query_params_args( - param_values, None, param_formats) - - cdef int rv - with nogil: - rv = libpq.PQsendQueryPrepared( - self._pgconn_ptr, name, cnparams, cvalues, - clengths, cformats, result_format) - _clear_query_params(ctypes, cvalues, clengths, cformats) - if not rv: - raise e.OperationalError( - f"sending prepared query failed: {self.get_error_message()}" - ) - - def prepare( - self, - const char *name, - const char *command, - param_types: Sequence[int] | None = None, - ) -> PGresult: - _ensure_pgconn(self) - - cdef int i - cdef Py_ssize_t nparams = len(param_types) if param_types else 0 - cdef libpq.Oid *atypes = NULL - if nparams: - atypes = PyMem_Malloc(nparams * sizeof(libpq.Oid)) - for i in range(nparams): - atypes[i] = param_types[i] - - cdef libpq.PGresult *rv - with nogil: - rv = libpq.PQprepare( - self._pgconn_ptr, name, command, nparams, atypes) - PyMem_Free(atypes) - if rv is NULL: - raise e.OperationalError(f"preparing query failed: {self.get_error_message()}") - return PGresult._from_ptr(rv) - - def exec_prepared( - self, - const char *name, - param_values: Sequence[bytes] | None, - param_formats: Sequence[int] | None = None, - int result_format = PqFormat.TEXT, - ) -> PGresult: - _ensure_pgconn(self) - - cdef Py_ssize_t cnparams - cdef libpq.Oid *ctypes - cdef char *const *cvalues - cdef int *clengths - cdef int *cformats - cnparams, ctypes, cvalues, clengths, cformats = _query_params_args( - param_values, None, param_formats) - - cdef libpq.PGresult *rv - with nogil: - rv = libpq.PQexecPrepared( - self._pgconn_ptr, name, cnparams, - cvalues, - clengths, cformats, result_format) - - _clear_query_params(ctypes, cvalues, clengths, cformats) - if rv is NULL: - raise e.OperationalError( - f"executing prepared query failed: {self.get_error_message()}" - ) - return PGresult._from_ptr(rv) - - def describe_prepared(self, const char *name) -> PGresult: - _ensure_pgconn(self) - cdef libpq.PGresult *rv = libpq.PQdescribePrepared(self._pgconn_ptr, name) - if rv is NULL: - raise e.OperationalError( - f"describe prepared failed: {self.get_error_message()}" - ) - return PGresult._from_ptr(rv) - - def send_describe_prepared(self, const char *name) -> None: - _ensure_pgconn(self) - cdef int rv = libpq.PQsendDescribePrepared(self._pgconn_ptr, name) - if not rv: - raise e.OperationalError( - f"sending describe prepared failed: {self.get_error_message()}" - ) - - def describe_portal(self, const char *name) -> PGresult: - _ensure_pgconn(self) - cdef libpq.PGresult *rv = libpq.PQdescribePortal(self._pgconn_ptr, name) - if rv is NULL: - raise e.OperationalError( - f"describe prepared failed: {self.get_error_message()}" - ) - return PGresult._from_ptr(rv) - - def send_describe_portal(self, const char *name) -> None: - _ensure_pgconn(self) - cdef int rv = libpq.PQsendDescribePortal(self._pgconn_ptr, name) - if not rv: - raise e.OperationalError( - f"sending describe prepared failed: {self.get_error_message()}" - ) - - def close_prepared(self, const char *name) -> PGresult: - _check_supported("PQclosePrepared", 170000) - _ensure_pgconn(self) - cdef libpq.PGresult *rv = libpq.PQclosePrepared(self._pgconn_ptr, name) - if rv is NULL: - raise e.OperationalError( - f"close prepared failed: {self.get_error_message()}" - ) - return PGresult._from_ptr(rv) - - def send_close_prepared(self, const char *name) -> None: - _check_supported("PQsendClosePrepared", 170000) - _ensure_pgconn(self) - cdef int rv = libpq.PQsendClosePrepared(self._pgconn_ptr, name) - if not rv: - raise e.OperationalError( - f"sending close prepared failed: {self.get_error_message()}" - ) - - def close_portal(self, const char *name) -> PGresult: - _check_supported("PQclosePortal", 170000) - _ensure_pgconn(self) - cdef libpq.PGresult *rv = libpq.PQclosePortal(self._pgconn_ptr, name) - if rv is NULL: - raise e.OperationalError( - f"close prepared failed: {self.get_error_message()}" - ) - return PGresult._from_ptr(rv) - - def send_close_portal(self, const char *name) -> None: - _check_supported("PQsendClosePortal", 170000) - _ensure_pgconn(self) - cdef int rv = libpq.PQsendClosePortal(self._pgconn_ptr, name) - if not rv: - raise e.OperationalError( - f"sending close prepared failed: {self.get_error_message()}" - ) - - def get_result(self) -> "PGresult" | None: - cdef libpq.PGresult *pgresult = libpq.PQgetResult(self._pgconn_ptr) - if pgresult is NULL: - return None - return PGresult._from_ptr(pgresult) - - def consume_input(self) -> None: - if 1 != libpq.PQconsumeInput(self._pgconn_ptr): - raise e.OperationalError(f"consuming input failed: {self.get_error_message()}") - - def is_busy(self) -> int: - cdef int rv - with nogil: - rv = libpq.PQisBusy(self._pgconn_ptr) - return rv - - @property - def nonblocking(self) -> int: - return libpq.PQisnonblocking(self._pgconn_ptr) - - @nonblocking.setter - def nonblocking(self, int arg) -> None: - if 0 > libpq.PQsetnonblocking(self._pgconn_ptr, arg): - raise e.OperationalError(f"setting nonblocking failed: {self.get_error_message()}") - - cpdef int flush(self) except -1: - if self._pgconn_ptr == NULL: - raise e.OperationalError(f"flushing failed: the connection is closed") - cdef int rv = libpq.PQflush(self._pgconn_ptr) - if rv < 0: - raise e.OperationalError(f"flushing failed: {self.get_error_message()}") - return rv - - def set_single_row_mode(self) -> None: - if not libpq.PQsetSingleRowMode(self._pgconn_ptr): - raise e.OperationalError("setting single row mode failed") - - def set_chunked_rows_mode(self, size: int) -> None: - if not libpq.PQsetChunkedRowsMode(self._pgconn_ptr, size): - raise e.OperationalError("setting chunked rows mode failed") - - def cancel_conn(self) -> PGcancelConn: - _check_supported("PQcancelCreate", 170000) - cdef libpq.PGcancelConn *ptr = libpq.PQcancelCreate(self._pgconn_ptr) - if not ptr: - raise e.OperationalError("couldn't create cancelConn object") - return PGcancelConn._from_ptr(ptr) - - def get_cancel(self) -> PGcancel: - cdef libpq.PGcancel *ptr = libpq.PQgetCancel(self._pgconn_ptr) - if not ptr: - raise e.OperationalError("couldn't create cancel object") - return PGcancel._from_ptr(ptr) - - cpdef object notifies(self): - cdef libpq.PGnotify *ptr - with nogil: - ptr = libpq.PQnotifies(self._pgconn_ptr) - if ptr: - ret = PGnotify(ptr.relname, ptr.be_pid, ptr.extra) - libpq.PQfreemem(ptr) - return ret - else: - return None - - def put_copy_data(self, buffer) -> int: - cdef int rv - cdef char *cbuffer - cdef Py_ssize_t length - - _buffer_as_string_and_size(buffer, &cbuffer, &length) - rv = libpq.PQputCopyData(self._pgconn_ptr, cbuffer, length) - if rv < 0: - raise e.OperationalError(f"sending copy data failed: {self.get_error_message()}") - return rv - - def put_copy_end(self, error: bytes | None = None) -> int: - cdef int rv - cdef const char *cerr = NULL - if error is not None: - cerr = PyBytes_AsString(error) - rv = libpq.PQputCopyEnd(self._pgconn_ptr, cerr) - if rv < 0: - raise e.OperationalError(f"sending copy end failed: {self.get_error_message()}") - return rv - - def get_copy_data(self, int async_) -> tuple[int, memoryview]: - cdef char *buffer_ptr = NULL - cdef int nbytes - nbytes = libpq.PQgetCopyData(self._pgconn_ptr, &buffer_ptr, async_) - if nbytes == -2: - raise e.OperationalError(f"receiving copy data failed: {self.get_error_message()}") - if buffer_ptr is not NULL: - data = PyMemoryView_FromObject( - PQBuffer._from_buffer(buffer_ptr, nbytes)) - return nbytes, data - else: - return nbytes, b"" # won't parse it, doesn't really be memoryview - - def trace(self, fileno: int) -> None: - if sys.platform != "linux": - raise e.NotSupportedError("currently only supported on Linux") - stream = fdopen(fileno, b"w") - libpq.PQtrace(self._pgconn_ptr, stream) - - def set_trace_flags(self, flags: Trace) -> None: - _check_supported("PQsetTraceFlags", 140000) - libpq.PQsetTraceFlags(self._pgconn_ptr, flags) - - def untrace(self) -> None: - libpq.PQuntrace(self._pgconn_ptr) - - def encrypt_password( - self, const char *passwd, const char *user, algorithm = None - ) -> bytes: - _check_supported("PQencryptPasswordConn", 100000) - - cdef char *out - cdef const char *calgo = NULL - if algorithm: - calgo = algorithm - out = libpq.PQencryptPasswordConn(self._pgconn_ptr, passwd, user, calgo) - if not out: - raise e.OperationalError( - f"password encryption failed: {self.get_error_message()}" - ) - - rv = bytes(out) - libpq.PQfreemem(out) - return rv - - def change_password( - self, const char *user, const char *passwd - ) -> None: - _check_supported("PQchangePassword", 170000) - - cdef libpq.PGresult *res - res = libpq.PQchangePassword(self._pgconn_ptr, user, passwd) - if libpq.PQresultStatus(res) != ExecStatus.COMMAND_OK: - raise e.OperationalError( - f"password encryption failed: {self.get_error_message()}" - ) - - def make_empty_result(self, int exec_status) -> PGresult: - cdef libpq.PGresult *rv = libpq.PQmakeEmptyPGresult( - self._pgconn_ptr, exec_status) - if not rv: - raise MemoryError("couldn't allocate empty PGresult") - return PGresult._from_ptr(rv) - - @property - def pipeline_status(self) -> int: - """The current pipeline mode status. - - For libpq < 14.0, always return 0 (PQ_PIPELINE_OFF). - """ - if libpq.PG_VERSION_NUM < 140000: - return libpq.PQ_PIPELINE_OFF - cdef int status = libpq.PQpipelineStatus(self._pgconn_ptr) - return status - - def enter_pipeline_mode(self) -> None: - """Enter pipeline mode. - - :raises ~e.OperationalError: in case of failure to enter the pipeline - mode. - """ - _check_supported("PQenterPipelineMode", 140000) - if libpq.PQenterPipelineMode(self._pgconn_ptr) != 1: - raise e.OperationalError("failed to enter pipeline mode") - - def exit_pipeline_mode(self) -> None: - """Exit pipeline mode. - - :raises ~e.OperationalError: in case of failure to exit the pipeline - mode. - """ - _check_supported("PQexitPipelineMode", 140000) - if libpq.PQexitPipelineMode(self._pgconn_ptr) != 1: - raise e.OperationalError(self.get_error_message()) - - def pipeline_sync(self) -> None: - """Mark a synchronization point in a pipeline. - - :raises ~e.OperationalError: if the connection is not in pipeline mode - or if sync failed. - """ - _check_supported("PQpipelineSync", 140000) - rv = libpq.PQpipelineSync(self._pgconn_ptr) - if rv == 0: - raise e.OperationalError("connection not in pipeline mode") - if rv != 1: - raise e.OperationalError("failed to sync pipeline") - - def send_flush_request(self) -> None: - """Sends a request for the server to flush its output buffer. - - :raises ~e.OperationalError: if the flush request failed. - """ - _check_supported("PQsendFlushRequest ", 140000) - cdef int rv = libpq.PQsendFlushRequest(self._pgconn_ptr) - if rv == 0: - raise e.OperationalError(f"flush request failed: {self.get_error_message()}") - - -cdef int _ensure_pgconn(PGconn pgconn) except 0: - if pgconn._pgconn_ptr is not NULL: - return 1 - - raise e.OperationalError("the connection is closed") - - -cdef char *_call_bytes(PGconn pgconn, conn_bytes_f func) except NULL: - """ - Call one of the pgconn libpq functions returning a bytes pointer. - """ - if not _ensure_pgconn(pgconn): - return NULL - cdef char *rv = func(pgconn._pgconn_ptr) - assert rv is not NULL - return rv - - -cdef int _call_int(PGconn pgconn, conn_int_f func) except -2: - """ - Call one of the pgconn libpq functions returning an int. - """ - if not _ensure_pgconn(pgconn): - return -2 - return func(pgconn._pgconn_ptr) - - -cdef void notice_receiver(void *arg, const libpq.PGresult *res_ptr) noexcept with gil: - cdef PGconn pgconn = arg - if pgconn.notice_handler is None: - return - - cdef PGresult res = PGresult._from_ptr(res_ptr) - try: - pgconn.notice_handler(res) - except Exception as e: - logger.exception("error in notice receiver: %s", e) - finally: - res._pgresult_ptr = NULL # avoid destroying the pgresult_ptr - - -cdef (Py_ssize_t, libpq.Oid *, char * const*, int *, int *) _query_params_args( - list param_values: Sequence[bytes | None] | None, - param_types: Sequence[int] | None, - list param_formats: Sequence[int] | None, -) except *: - cdef int i - - # the PostgresQuery converts the param_types to tuple, so this operation - # is most often no-op - cdef tuple tparam_types - if param_types is not None and not isinstance(param_types, tuple): - tparam_types = tuple(param_types) - else: - tparam_types = param_types - - cdef Py_ssize_t nparams = len(param_values) if param_values else 0 - if tparam_types is not None and len(tparam_types) != nparams: - raise ValueError( - "got %d param_values but %d param_types" - % (nparams, len(tparam_types)) - ) - if param_formats is not None and len(param_formats) != nparams: - raise ValueError( - "got %d param_values but %d param_formats" - % (nparams, len(param_formats)) - ) - - cdef char **aparams = NULL - cdef int *alenghts = NULL - cdef char *ptr - cdef Py_ssize_t length - - if nparams: - aparams = PyMem_Malloc(nparams * sizeof(char *)) - alenghts = PyMem_Malloc(nparams * sizeof(int)) - for i in range(nparams): - obj = param_values[i] - if obj is None: - aparams[i] = NULL - alenghts[i] = 0 - else: - # TODO: it is a leak if this fails (but it should only fail - # on internal error, e.g. if obj is not a buffer) - _buffer_as_string_and_size(obj, &ptr, &length) - aparams[i] = ptr - alenghts[i] = length - - cdef libpq.Oid *atypes = NULL - if tparam_types: - atypes = PyMem_Malloc(nparams * sizeof(libpq.Oid)) - for i in range(nparams): - atypes[i] = tparam_types[i] - - cdef int *aformats = NULL - if param_formats is not None: - aformats = PyMem_Malloc(nparams * sizeof(int *)) - for i in range(nparams): - aformats[i] = param_formats[i] - - return (nparams, atypes, aparams, alenghts, aformats) - - -cdef void _clear_query_params( - libpq.Oid *ctypes, char *const *cvalues, int *clenghst, int *cformats -): - PyMem_Free(ctypes) - PyMem_Free(cvalues) - PyMem_Free(clenghst) - PyMem_Free(cformats) diff --git a/psycopg_c/psycopg_c/pq/pgresult.pyx b/psycopg_c/psycopg_c/pq/pgresult.pyx deleted file mode 100644 index 9547af93a..000000000 --- a/psycopg_c/psycopg_c/pq/pgresult.pyx +++ /dev/null @@ -1,160 +0,0 @@ -""" -psycopg_c.pq.PGresult object implementation. -""" - -# Copyright (C) 2020 The Psycopg Team - -cimport cython -from cpython.mem cimport PyMem_Free, PyMem_Malloc - -from psycopg.pq.misc import PGresAttDesc -from psycopg.pq._enums import ExecStatus - - -@cython.freelist(8) -cdef class PGresult: - def __cinit__(self): - self._pgresult_ptr = NULL - - @staticmethod - cdef PGresult _from_ptr(libpq.PGresult *ptr): - cdef PGresult rv = PGresult.__new__(PGresult) - rv._pgresult_ptr = ptr - return rv - - def __dealloc__(self) -> None: - self.clear() - - def __repr__(self) -> str: - cls = f"{self.__class__.__module__}.{self.__class__.__qualname__}" - status = ExecStatus(self.status) - return f"<{cls} [{status.name}] at 0x{id(self):x}>" - - def clear(self) -> None: - if self._pgresult_ptr is not NULL: - libpq.PQclear(self._pgresult_ptr) - self._pgresult_ptr = NULL - - @property - def pgresult_ptr(self) -> int | None: - if self._pgresult_ptr: - return self._pgresult_ptr - else: - return None - - @property - def status(self) -> int: - return libpq.PQresultStatus(self._pgresult_ptr) - - @property - def error_message(self) -> bytes: - return libpq.PQresultErrorMessage(self._pgresult_ptr) - - def get_error_message(self, encoding: str = "utf-8") -> str: - return _clean_error_message(self.error_message, encoding) - - def error_field(self, int fieldcode) -> bytes | None: - cdef char * rv = libpq.PQresultErrorField(self._pgresult_ptr, fieldcode) - if rv is not NULL: - return rv - else: - return None - - @property - def ntuples(self) -> int: - return libpq.PQntuples(self._pgresult_ptr) - - @property - def nfields(self) -> int: - return libpq.PQnfields(self._pgresult_ptr) - - def fname(self, int column_number) -> bytes | None: - cdef char *rv = libpq.PQfname(self._pgresult_ptr, column_number) - if rv is not NULL: - return rv - else: - return None - - def ftable(self, int column_number) -> int: - return libpq.PQftable(self._pgresult_ptr, column_number) - - def ftablecol(self, int column_number) -> int: - return libpq.PQftablecol(self._pgresult_ptr, column_number) - - def fformat(self, int column_number) -> int: - return libpq.PQfformat(self._pgresult_ptr, column_number) - - def ftype(self, int column_number) -> int: - return libpq.PQftype(self._pgresult_ptr, column_number) - - def fmod(self, int column_number) -> int: - return libpq.PQfmod(self._pgresult_ptr, column_number) - - def fsize(self, int column_number) -> int: - return libpq.PQfsize(self._pgresult_ptr, column_number) - - @property - def binary_tuples(self) -> int: - return libpq.PQbinaryTuples(self._pgresult_ptr) - - def get_value(self, int row_number, int column_number) -> bytes | None: - cdef int crow = row_number - cdef int ccol = column_number - cdef int length = libpq.PQgetlength(self._pgresult_ptr, crow, ccol) - cdef char *v - if length: - v = libpq.PQgetvalue(self._pgresult_ptr, crow, ccol) - # TODO: avoid copy - return v[:length] - else: - if libpq.PQgetisnull(self._pgresult_ptr, crow, ccol): - return None - else: - return b"" - - @property - def nparams(self) -> int: - return libpq.PQnparams(self._pgresult_ptr) - - def param_type(self, int param_number) -> int: - return libpq.PQparamtype(self._pgresult_ptr, param_number) - - @property - def command_status(self) -> bytes | None: - cdef char *rv = libpq.PQcmdStatus(self._pgresult_ptr) - if rv is not NULL: - return rv - else: - return None - - @property - def command_tuples(self) -> int | None: - cdef char *rv = libpq.PQcmdTuples(self._pgresult_ptr) - if rv is NULL: - return None - cdef bytes brv = rv - return int(brv) if brv else None - - @property - def oid_value(self) -> int: - return libpq.PQoidValue(self._pgresult_ptr) - - def set_attributes(self, descriptions: list[PGresAttDesc]): - cdef Py_ssize_t num = len(descriptions) - cdef libpq.PGresAttDesc *attrs = PyMem_Malloc( - num * sizeof(libpq.PGresAttDesc)) - - for i in range(num): - descr = descriptions[i] - attrs[i].name = descr.name - attrs[i].tableid = descr.tableid - attrs[i].columnid = descr.columnid - attrs[i].format = descr.format - attrs[i].typid = descr.typid - attrs[i].typlen = descr.typlen - attrs[i].atttypmod = descr.atttypmod - - cdef int res = libpq.PQsetResultAttrs(self._pgresult_ptr, num, attrs) - PyMem_Free(attrs) - if (res == 0): - raise e.OperationalError("PQsetResultAttrs failed") diff --git a/psycopg_c/psycopg_c/pq/pqbuffer.pyx b/psycopg_c/psycopg_c/pq/pqbuffer.pyx deleted file mode 100644 index 69118cb00..000000000 --- a/psycopg_c/psycopg_c/pq/pqbuffer.pyx +++ /dev/null @@ -1,111 +0,0 @@ -""" -PQbuffer object implementation. -""" - -# Copyright (C) 2020 The Psycopg Team - -cimport cython -from cpython.bytes cimport PyBytes_AsStringAndSize -from cpython.buffer cimport PyBUF_SIMPLE, PyBuffer_Release, PyObject_CheckBuffer -from cpython.buffer cimport PyObject_GetBuffer - - -@cython.freelist(32) -cdef class PQBuffer: - """ - Wrap a chunk of memory allocated by the libpq and expose it as memoryview. - """ - @staticmethod - cdef PQBuffer _from_buffer(unsigned char *buf, Py_ssize_t length): - cdef PQBuffer rv = PQBuffer.__new__(PQBuffer) - rv.buf = buf - rv.len = length - return rv - - def __cinit__(self): - self.buf = NULL - self.len = 0 - - def __dealloc__(self): - if self.buf: - libpq.PQfreemem(self.buf) - - def __repr__(self): - return ( - f"{self.__class__.__module__}.{self.__class__.__qualname__}" - f"({bytes(self)})" - ) - - def __getbuffer__(self, Py_buffer *buffer, int flags): - buffer.buf = self.buf - buffer.obj = self - buffer.len = self.len - buffer.itemsize = sizeof(unsigned char) - buffer.readonly = 1 - buffer.ndim = 1 - buffer.format = NULL # unsigned char - buffer.shape = &self.len - buffer.strides = NULL - buffer.suboffsets = NULL - buffer.internal = NULL - - def __releasebuffer__(self, Py_buffer *buffer): - pass - - -@cython.freelist(32) -cdef class ViewBuffer: - """ - Wrap a chunk of memory owned by a different object. - """ - @staticmethod - cdef ViewBuffer _from_buffer( - object obj, unsigned char *buf, Py_ssize_t length - ): - cdef ViewBuffer rv = ViewBuffer.__new__(ViewBuffer) - rv.obj = obj - rv.buf = buf - rv.len = length - return rv - - def __cinit__(self): - self.buf = NULL - self.len = 0 - - def __repr__(self): - return ( - f"{self.__class__.__module__}.{self.__class__.__qualname__}" - f"({bytes(self)})" - ) - - def __getbuffer__(self, Py_buffer *buffer, int flags): - buffer.buf = self.buf - buffer.obj = self - buffer.len = self.len - buffer.itemsize = sizeof(unsigned char) - buffer.readonly = 1 - buffer.ndim = 1 - buffer.format = NULL # unsigned char - buffer.shape = &self.len - buffer.strides = NULL - buffer.suboffsets = NULL - buffer.internal = NULL - - def __releasebuffer__(self, Py_buffer *buffer): - pass - - -cdef int _buffer_as_string_and_size( - data: "Buffer", char **ptr, Py_ssize_t *length -) except -1: - cdef Py_buffer buf - - if isinstance(data, bytes): - PyBytes_AsStringAndSize(data, ptr, length) - elif PyObject_CheckBuffer(data): - PyObject_GetBuffer(data, &buf, PyBUF_SIMPLE) - ptr[0] = buf.buf - length[0] = buf.len - PyBuffer_Release(&buf) - else: - raise TypeError(f"bytes or buffer expected, got {type(data)}") diff --git a/psycopg_c/psycopg_c/types/array.pyx b/psycopg_c/psycopg_c/types/array.pyx deleted file mode 100644 index a67cc1d3d..000000000 --- a/psycopg_c/psycopg_c/types/array.pyx +++ /dev/null @@ -1,286 +0,0 @@ -""" -C optimized functions to manipulate arrays -""" - -# Copyright (C) 2022 The Psycopg Team - -import cython - -from cpython.mem cimport PyMem_Free, PyMem_Realloc -from cpython.ref cimport Py_INCREF -from libc.stdint cimport int32_t, uint32_t -from libc.string cimport memset, strchr -from cpython.list cimport PyList_Append, PyList_GET_ITEM, PyList_GET_SIZE -from cpython.list cimport PyList_GetSlice, PyList_New, PyList_SET_ITEM -from cpython.object cimport PyObject - -from psycopg_c.pq cimport _buffer_as_string_and_size -from psycopg_c._psycopg cimport endian -from psycopg_c.pq.libpq cimport Oid - -from psycopg import errors as e - - -cdef extern from *: - """ -/* Defined in PostgreSQL in src/include/utils/array.h */ -#define MAXDIM 6 - """ - const int MAXDIM - - -cdef class ArrayLoader(_CRecursiveLoader): - - format = PQ_TEXT - base_oid = 0 - delimiter = b"," - - cdef PyObject *row_loader - cdef char cdelim - - # A memory area which used to unescape elements. - # Keep it here to avoid a malloc per element and to set up exceptions - # to make sure to free it on error. - cdef char *scratch - cdef size_t sclen - - cdef object cload(self, const char *data, size_t length): - if self.cdelim == b"\x00": - self.row_loader = self._tx._c_get_loader( - self.base_oid, PQ_TEXT) - self.cdelim = self.delimiter[0] - - return _array_load_text( - data, length, self.row_loader, self.cdelim, - &(self.scratch), &(self.sclen)) - - def __dealloc__(self): - PyMem_Free(self.scratch) - - -@cython.final -cdef class ArrayBinaryLoader(_CRecursiveLoader): - - format = PQ_BINARY - - cdef PyObject *row_loader - - cdef object cload(self, const char *data, size_t length): - rv = _array_load_binary(data, length, self._tx, &(self.row_loader)) - return rv - - -cdef object _array_load_text( - const char *buf, size_t length, PyObject *row_loader, char cdelim, - char **scratch, size_t *sclen -): - if length == 0: - raise e.DataError("malformed array: empty data") - - cdef const char *end = buf + length - - # Remove the dimensions information prefix (``[...]=``) - if buf[0] == b"[": - buf = strchr(buf + 1, b'=') - if buf == NULL: - raise e.DataError("malformed array: no '=' after dimension information") - buf += 1 - - # TODO: further optimization: pre-scan the array to find the array - # dimensions, so that we can preallocate the list sized instead of calling - # append, which is the dominating operation - - cdef list stack = [] - cdef list a = [] - rv = a - cdef PyObject *tmp - - cdef CLoader cloader = None - cdef object pyload = None - if (row_loader).cloader is not None: - cloader = (row_loader).cloader - else: - pyload = (row_loader).loadfunc - - while buf < end: - if buf[0] == b'{': - if stack: - tmp = PyList_GET_ITEM(stack, PyList_GET_SIZE(stack) - 1) - PyList_Append(tmp, a) - PyList_Append(stack, a) - a = [] - buf += 1 - - elif buf[0] == b'}': - if not stack: - raise e.DataError("malformed array: unexpected '}'") - rv = stack.pop() - buf += 1 - - elif buf[0] == cdelim: - buf += 1 - - else: - v = _parse_token( - &buf, end, cdelim, scratch, sclen, cloader, pyload) - if not stack: - raise e.DataError("malformed array: missing initial '{'") - tmp = PyList_GET_ITEM(stack, PyList_GET_SIZE(stack) - 1) - PyList_Append(tmp, v) - - return rv - - -cdef object _parse_token( - const char **bufptr, const char *bufend, char cdelim, - char **scratch, size_t *sclen, CLoader cloader, object load -): - cdef const char *start = bufptr[0] - cdef int has_quotes = start[0] == b'"' - cdef int quoted = has_quotes - cdef int num_escapes = 0 - cdef int escaped = 0 - - if has_quotes: - start += 1 - cdef const char *end = start - - while end < bufend: - if (end[0] == cdelim or end[0] == b'}') and not quoted: - break - elif end[0] == b'\\' and not escaped: - num_escapes += 1 - escaped = 1 - end += 1 - continue - elif end[0] == b'"' and not escaped: - quoted = 0 - escaped = 0 - end += 1 - else: - raise e.DataError("malformed array: hit the end of the buffer") - - # Return the new position for the buffer - bufptr[0] = end - if has_quotes: - end -= 1 - - cdef Py_ssize_t length = (end - start) - if length == 4 and not has_quotes \ - and start[0] == b'N' and start[1] == b'U' \ - and start[2] == b'L' and start[3] == b'L': - return None - - cdef const char *src - cdef char *tgt - cdef size_t unesclen - - if not num_escapes: - if cloader is not None: - return cloader.cload(start, length) - else: - b = start[:length] - return load(b) - - else: - unesclen = length - num_escapes + 1 - if unesclen > sclen[0]: - scratch[0] = PyMem_Realloc(scratch[0], unesclen) - sclen[0] = unesclen - - src = start - tgt = scratch[0] - while src < end: - if src[0] == b'\\': - src += 1 - tgt[0] = src[0] - src += 1 - tgt += 1 - - tgt[0] = b'\x00' - - if cloader is not None: - return cloader.cload(scratch[0], length - num_escapes) - else: - b = scratch[0][:length - num_escapes] - return load(b) - - -@cython.cdivision(True) -cdef object _array_load_binary( - const char *buf, size_t length, Transformer tx, PyObject **row_loader_ptr -): - # head is ndims, hasnull, elem oid - cdef uint32_t buf32 - memcpy(&buf32, buf, sizeof(buf32)) - cdef int ndims = endian.be32toh(buf32) - - if ndims <= 0: - return [] - elif ndims > MAXDIM: - raise e.DataError( - r"unexpected number of dimensions %s exceeding the maximum allowed %s" - % (ndims, MAXDIM) - ) - - cdef object oid - cdef uint32_t beoid - if row_loader_ptr[0] == NULL: - memcpy(&beoid, buf + 2 * sizeof(uint32_t), sizeof(beoid)) - oid = endian.be32toh(beoid) - row_loader_ptr[0] = tx._c_get_loader(oid, PQ_BINARY) - - cdef Py_ssize_t[MAXDIM] dims - cdef int i - cdef uint32_t bedata - cdef const char *dimptr = buf + 3 * sizeof(uint32_t) - for i in range(ndims): - # Every dimension is dim, lower bound - memcpy(&bedata, dimptr, sizeof(bedata)) - dimptr += 2 * sizeof(uint32_t) - dims[i] = endian.be32toh(bedata) - - buf += (3 + 2 * ndims) * sizeof(bedata) - out = _array_load_binary_rec(ndims, dims, &buf, row_loader_ptr[0]) - return out - - -cdef object _array_load_binary_rec( - Py_ssize_t ndims, Py_ssize_t *dims, const char **bufptr, PyObject *row_loader -): - cdef const char *buf - cdef int i - cdef uint32_t besize - cdef int32_t size - cdef object val - - cdef Py_ssize_t nelems = dims[0] - cdef list out = PyList_New(nelems) - - if ndims == 1: - buf = bufptr[0] - for i in range(nelems): - memcpy(&besize, buf, sizeof(besize)) - size = endian.be32toh(besize) - buf += sizeof(besize) - if size == -1: - val = None - else: - if (row_loader).cloader is not None: - val = (row_loader).cloader.cload(buf, size) - else: - val = (row_loader).loadfunc(buf[:size]) - buf += size - - Py_INCREF(val) - PyList_SET_ITEM(out, i, val) - - bufptr[0] = buf - - else: - for i in range(nelems): - val = _array_load_binary_rec(ndims - 1, dims + 1, bufptr, row_loader) - Py_INCREF(val) - PyList_SET_ITEM(out, i, val) - - return out diff --git a/psycopg_c/psycopg_c/types/bool.pyx b/psycopg_c/psycopg_c/types/bool.pyx deleted file mode 100644 index a3d8d9088..000000000 --- a/psycopg_c/psycopg_c/types/bool.pyx +++ /dev/null @@ -1,78 +0,0 @@ -""" -Cython adapters for boolean. -""" - -# Copyright (C) 2020 The Psycopg Team - -cimport cython - - -@cython.final -cdef class BoolDumper(CDumper): - - format = PQ_TEXT - oid = oids.BOOL_OID - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - cdef char *buf = CDumper.ensure_size(rv, offset, 1) - - # Fast paths, just a pointer comparison - if obj is True: - buf[0] = b"t" - elif obj is False: - buf[0] = b"f" - elif obj: - buf[0] = b"t" - else: - buf[0] = b"f" - - return 1 - - def quote(self, obj: bool) -> Buffer | None: - if obj is True: - return b"true" - elif obj is False: - return b"false" - else: - return b"true" if obj else b"false" - - -@cython.final -cdef class BoolBinaryDumper(CDumper): - - format = PQ_BINARY - oid = oids.BOOL_OID - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - cdef char *buf = CDumper.ensure_size(rv, offset, 1) - - # Fast paths, just a pointer comparison - if obj is True: - buf[0] = b"\x01" - elif obj is False: - buf[0] = b"\x00" - elif obj: - buf[0] = b"\x01" - else: - buf[0] = b"\x00" - - return 1 - - -@cython.final -cdef class BoolLoader(CLoader): - - format = PQ_TEXT - - cdef object cload(self, const char *data, size_t length): - # this creates better C than `return data[0] == b't'` - return True if data[0] == b't' else False - - -@cython.final -cdef class BoolBinaryLoader(CLoader): - - format = PQ_BINARY - - cdef object cload(self, const char *data, size_t length): - return True if data[0] else False diff --git a/psycopg_c/psycopg_c/types/datetime.pyx b/psycopg_c/psycopg_c/types/datetime.pyx deleted file mode 100644 index 54355d222..000000000 --- a/psycopg_c/psycopg_c/types/datetime.pyx +++ /dev/null @@ -1,1176 +0,0 @@ -""" -Cython adapters for date/time types. -""" - -# Copyright (C) 2021 The Psycopg Team - -from cpython cimport datetime as cdt -from libc.stdint cimport int64_t -from libc.string cimport memset, strchr -from cpython.dict cimport PyDict_GetItem -from cpython.object cimport PyObject, PyObject_CallFunctionObjArgs - - -cdef extern from "Python.h": - const char *PyUnicode_AsUTF8AndSize(unicode obj, Py_ssize_t *size) except NULL - object PyTimeZone_FromOffset(object offset) - -cdef extern from *: - """ -/* Multipliers from fraction of seconds to microseconds */ -static int _uspad[] = {0, 100000, 10000, 1000, 100, 10, 1}; - """ - cdef int *_uspad - -from datetime import date, datetime, time, timedelta, timezone -from zoneinfo import ZoneInfo - -from psycopg_c._psycopg cimport endian - -from psycopg import errors as e - -# Initialise the datetime C API -cdt.import_datetime() - -cdef enum: - ORDER_YMD = 0 - ORDER_DMY = 1 - ORDER_MDY = 2 - ORDER_PGDM = 3 - ORDER_PGMD = 4 - -cdef enum: - INTERVALSTYLE_OTHERS = 0 - INTERVALSTYLE_SQL_STANDARD = 1 - INTERVALSTYLE_POSTGRES = 2 - -cdef enum: - PG_DATE_EPOCH_DAYS = 730120 # date(2000, 1, 1).toordinal() - PY_DATE_MIN_DAYS = 1 # date.min.toordinal() - -cdef object date_toordinal = date.toordinal -cdef object date_fromordinal = date.fromordinal -cdef object datetime_astimezone = datetime.astimezone -cdef object time_utcoffset = time.utcoffset -cdef object timedelta_total_seconds = timedelta.total_seconds -cdef object timezone_utc = timezone.utc -cdef object pg_datetime_epoch = datetime(2000, 1, 1) -cdef object pg_datetimetz_epoch = datetime(2000, 1, 1, tzinfo=timezone.utc) - -cdef object _month_abbr = { - n: i - for i, n in enumerate( - b"Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec".split(), 1 - ) -} - - -@cython.final -cdef class DateDumper(CDumper): - - format = PQ_TEXT - oid = oids.DATE_OID - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - cdef Py_ssize_t size; - cdef const char *src - - # NOTE: whatever the PostgreSQL DateStyle input format (DMY, MDY, YMD) - # the YYYY-MM-DD is always understood correctly. - cdef str s = str(obj) - src = PyUnicode_AsUTF8AndSize(s, &size) - - cdef char *buf = CDumper.ensure_size(rv, offset, size) - memcpy(buf, src, size) - return size - - -@cython.final -cdef class DateBinaryDumper(CDumper): - - format = PQ_BINARY - oid = oids.DATE_OID - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - cdef int32_t days = PyObject_CallFunctionObjArgs( - date_toordinal, obj, NULL) - days -= PG_DATE_EPOCH_DAYS - cdef uint32_t bedays = endian.htobe32(days) - cdef uint32_t *buf = CDumper.ensure_size(rv, offset, sizeof(bedays)) - memcpy(buf, &bedays, sizeof(bedays)) - return sizeof(bedays) - - -cdef class _BaseTimeDumper(CDumper): - - cpdef get_key(self, obj, format): - # Use (cls,) to report the need to upgrade to a dumper for timetz (the - # Frankenstein of the data types). - if not obj.tzinfo: - return self.cls - else: - return (self.cls,) - - cpdef upgrade(self, obj: time, format): - raise NotImplementedError - - cdef object _get_offset(self, obj): - off = PyObject_CallFunctionObjArgs(time_utcoffset, obj, NULL) - if off is None: - raise e.DataError( - f"cannot calculate the offset of tzinfo '{obj.tzinfo}' without a date" - ) - return off - - -cdef class _BaseTimeTextDumper(_BaseTimeDumper): - - format = PQ_TEXT - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - cdef Py_ssize_t size; - cdef const char *src - - cdef str s = str(obj) - src = PyUnicode_AsUTF8AndSize(s, &size) - - cdef char *buf = CDumper.ensure_size(rv, offset, size) - memcpy(buf, src, size) - return size - - -@cython.final -cdef class TimeDumper(_BaseTimeTextDumper): - - oid = oids.TIME_OID - - cpdef upgrade(self, obj, format): - if not obj.tzinfo: - return self - else: - return TimeTzDumper(self.cls) - - -@cython.final -cdef class TimeTzDumper(_BaseTimeTextDumper): - - oid = oids.TIMETZ_OID - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - self._get_offset(obj) - return _BaseTimeTextDumper.cdump(self, obj, rv, offset) - - -@cython.final -cdef class TimeBinaryDumper(_BaseTimeDumper): - - format = PQ_BINARY - oid = oids.TIME_OID - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - cdef int64_t us = cdt.time_microsecond(obj) + 1000000 * ( - cdt.time_second(obj) - + 60 * (cdt.time_minute(obj) + 60 * cdt.time_hour(obj)) - ) - cdef uint64_t beus = endian.htobe64(us) - - cdef char *buf = CDumper.ensure_size(rv, offset, sizeof(beus)) - memcpy(buf, &beus, sizeof(beus)) - return sizeof(beus) - - cpdef upgrade(self, obj, format): - if not obj.tzinfo: - return self - else: - return TimeTzBinaryDumper(self.cls) - - -@cython.final -cdef class TimeTzBinaryDumper(_BaseTimeDumper): - - format = PQ_BINARY - oid = oids.TIMETZ_OID - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - cdef int64_t us = cdt.time_microsecond(obj) + 1_000_000 * ( - cdt.time_second(obj) - + 60 * (cdt.time_minute(obj) + 60 * cdt.time_hour(obj)) - ) - cdef uint64_t beus = endian.htobe64(us) - - off = self._get_offset(obj) - cdef int32_t offsec = int(PyObject_CallFunctionObjArgs( - timedelta_total_seconds, off, NULL)) - cdef uint32_t beoff = endian.htobe32(-offsec) - - cdef char *buf = CDumper.ensure_size(rv, offset, sizeof(beus) + sizeof(beoff)) - memcpy(buf, &beus, sizeof(beus)) - memcpy(buf + sizeof(beus), &beoff, sizeof(beoff)) - return sizeof(beus) + sizeof(beoff) - - -cdef class _BaseDatetimeDumper(CDumper): - - cpdef get_key(self, obj, format): - # Use (cls,) to report the need to upgrade (downgrade, actually) to a - # dumper for naive timestamp. - if obj.tzinfo: - return self.cls - else: - return (self.cls,) - - cpdef upgrade(self, obj: time, format): - raise NotImplementedError - - -cdef class _BaseDatetimeTextDumper(_BaseDatetimeDumper): - - format = PQ_TEXT - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - cdef Py_ssize_t size; - cdef const char *src - - # NOTE: whatever the PostgreSQL DateStyle input format (DMY, MDY, YMD) - # the YYYY-MM-DD is always understood correctly. - cdef str s = str(obj) - src = PyUnicode_AsUTF8AndSize(s, &size) - - cdef char *buf = CDumper.ensure_size(rv, offset, size) - memcpy(buf, src, size) - return size - - -@cython.final -cdef class DatetimeDumper(_BaseDatetimeTextDumper): - - oid = oids.TIMESTAMPTZ_OID - - cpdef upgrade(self, obj, format): - if obj.tzinfo: - return self - else: - return DatetimeNoTzDumper(self.cls) - - -@cython.final -cdef class DatetimeNoTzDumper(_BaseDatetimeTextDumper): - - oid = oids.TIMESTAMP_OID - - -@cython.final -cdef class DatetimeBinaryDumper(_BaseDatetimeDumper): - - format = PQ_BINARY - oid = oids.TIMESTAMPTZ_OID - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - delta = obj - pg_datetimetz_epoch - - cdef int64_t us = cdt.timedelta_microseconds(delta) + 1_000_000 * ( - 86_400 * cdt.timedelta_days(delta) - + cdt.timedelta_seconds(delta)) - cdef uint64_t beus = endian.htobe64(us) - - cdef char *buf = CDumper.ensure_size(rv, offset, sizeof(beus)) - memcpy(buf, &beus, sizeof(beus)) - return sizeof(beus) - - cpdef upgrade(self, obj, format): - if obj.tzinfo: - return self - else: - return DatetimeNoTzBinaryDumper(self.cls) - - -@cython.final -cdef class DatetimeNoTzBinaryDumper(_BaseDatetimeDumper): - - format = PQ_BINARY - oid = oids.TIMESTAMP_OID - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - delta = obj - pg_datetime_epoch - - cdef int64_t us = cdt.timedelta_microseconds(delta) + 1_000_000 * ( - 86_400 * cdt.timedelta_days(delta) - + cdt.timedelta_seconds(delta)) - cdef uint64_t beus = endian.htobe64(us) - - cdef char *buf = CDumper.ensure_size(rv, offset, sizeof(beus)) - memcpy(buf, &beus, sizeof(beus)) - return sizeof(beus) - - -@cython.final -cdef class TimedeltaDumper(CDumper): - - format = PQ_TEXT - oid = oids.INTERVAL_OID - cdef int _style - - def __cinit__(self, cls, context: AdaptContext | None = None): - - cdef const char *ds = _get_intervalstyle(self._pgconn) - if ds[0] == b's': # sql_standard - self._style = INTERVALSTYLE_SQL_STANDARD - else: # iso_8601, postgres, postgres_verbose - self._style = INTERVALSTYLE_OTHERS - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - cdef Py_ssize_t size; - cdef const char *src - - cdef str s - if self._style == INTERVALSTYLE_OTHERS: - # The comma is parsed ok by PostgreSQL but it's not documented - # and it seems brittle to rely on it. CRDB doesn't consume it well. - s = str(obj).replace(",", "") - else: - # sql_standard format needs explicit signs - # otherwise -1 day 1 sec will mean -1 sec - s = "%+d day %+d second %+d microsecond" % ( - obj.days, obj.seconds, obj.microseconds) - - src = PyUnicode_AsUTF8AndSize(s, &size) - - cdef char *buf = CDumper.ensure_size(rv, offset, size) - memcpy(buf, src, size) - return size - - -@cython.final -cdef class TimedeltaBinaryDumper(CDumper): - - format = PQ_BINARY - oid = oids.INTERVAL_OID - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - cdef int64_t us = ( - 1_000_000 * cdt.timedelta_seconds(obj) - + cdt.timedelta_microseconds(obj)) - cdef uint64_t beus = endian.htobe64(us) - - cdef int32_t days = cdt.timedelta_days(obj) - cdef uint32_t bedays = endian.htobe32(days) - - # The third item is months - cdef char *buf = CDumper.ensure_size( - rv, offset, sizeof(beus) + sizeof(bedays) + sizeof(int32_t)) - memcpy(buf, &beus, sizeof(beus)) - memcpy(buf + sizeof(beus), &bedays, sizeof(bedays)) - memset(buf + sizeof(beus) + sizeof(bedays), 0, sizeof(int32_t)) - - return sizeof(beus) + sizeof(bedays) + sizeof(int32_t) - - -@cython.final -cdef class DateLoader(CLoader): - - format = PQ_TEXT - cdef int _order - - def __cinit__(self, oid: int, context: AdaptContext | None = None): - - cdef const char *ds = _get_datestyle(self._pgconn) - if ds[0] == b'I': # ISO - self._order = ORDER_YMD - elif ds[0] == b'G': # German - self._order = ORDER_DMY - elif ds[0] == b'S': # SQL, DMY / MDY - self._order = ORDER_DMY if ds[5] == b'D' else ORDER_MDY - elif ds[0] == b'P': # Postgres, DMY / MDY - self._order = ORDER_DMY if ds[10] == b'D' else ORDER_MDY - else: - raise e.InterfaceError(f"unexpected DateStyle: {ds.decode('ascii')}") - - cdef object _error_date(self, const char *data, str msg): - s = bytes(data).decode("utf8", "replace") - if s == "infinity" or len(s.split()[0]) > 10: - raise e.DataError(f"date too large (after year 10K): {s!r}") from None - elif s == "-infinity" or "BC" in s: - raise e.DataError(f"date too small (before year 1): {s!r}") from None - else: - raise e.DataError(f"can't parse date {s!r}: {msg}") from None - - cdef object cload(self, const char *data, size_t length): - if length != 10: - self._error_date(data, "unexpected length") - - cdef int64_t vals[3] - memset(vals, 0, sizeof(vals)) - - cdef const char *ptr - cdef const char *end = data + length - ptr = _parse_date_values(data, end, vals, ARRAYSIZE(vals)) - if ptr == NULL: - s = bytes(data).decode("utf8", "replace") - raise e.DataError(f"can't parse date {s!r}") - - try: - if self._order == ORDER_YMD: - return cdt.date_new(vals[0], vals[1], vals[2]) - elif self._order == ORDER_DMY: - return cdt.date_new(vals[2], vals[1], vals[0]) - else: - return cdt.date_new(vals[2], vals[0], vals[1]) - except ValueError as ex: - self._error_date(data, str(ex)) - - -@cython.final -cdef class DateBinaryLoader(CLoader): - - format = PQ_BINARY - - cdef object cload(self, const char *data, size_t length): - cdef uint32_t bedata - memcpy(&bedata, data, sizeof(bedata)) - cdef int days = endian.be32toh(bedata) - cdef object pydays = days + PG_DATE_EPOCH_DAYS - try: - return PyObject_CallFunctionObjArgs( - date_fromordinal, pydays, NULL) - except ValueError: - if days < PY_DATE_MIN_DAYS: - raise e.DataError("date too small (before year 1)") from None - else: - raise e.DataError("date too large (after year 10K)") from None - - -@cython.final -cdef class TimeLoader(CLoader): - - format = PQ_TEXT - - cdef object cload(self, const char *data, size_t length): - - cdef int64_t vals[3] - memset(vals, 0, sizeof(vals)) - cdef const char *ptr - cdef const char *end = data + length - - # Parse the first 3 groups of digits - ptr = _parse_date_values(data, end, vals, ARRAYSIZE(vals)) - if ptr == NULL: - s = bytes(data).decode("utf8", "replace") - raise e.DataError(f"can't parse time {s!r}") - - # Parse the microseconds - cdef int us = 0 - if ptr[0] == b".": - ptr = _parse_micros(ptr + 1, &us) - - try: - return cdt.time_new(vals[0], vals[1], vals[2], us, None) - except ValueError as ex: - s = bytes(data).decode("utf8", "replace") - raise e.DataError(f"can't parse time {s!r}: {ex}") from None - - -@cython.final -cdef class TimeBinaryLoader(CLoader): - - format = PQ_BINARY - - cdef object cload(self, const char *data, size_t length): - cdef uint64_t bedata - memcpy(&bedata, data, sizeof(bedata)) - cdef int64_t val = endian.be64toh(bedata) - cdef int h, m, s, us - - with cython.cdivision(True): - us = val % 1_000_000 - val //= 1_000_000 - - s = val % 60 - val //= 60 - - m = val % 60 - h = (val // 60) - - try: - return cdt.time_new(h, m, s, us, None) - except ValueError: - raise e.DataError( - f"time not supported by Python: hour={h}" - ) from None - - -@cython.final -cdef class TimetzLoader(CLoader): - - format = PQ_TEXT - - cdef object cload(self, const char *data, size_t length): - - cdef int64_t vals[3] - memset(vals, 0, sizeof(vals)) - cdef const char *ptr - cdef const char *end = data + length - - # Parse the first 3 groups of digits (time) - ptr = _parse_date_values(data, end, vals, ARRAYSIZE(vals)) - if ptr == NULL: - s = bytes(data).decode("utf8", "replace") - raise e.DataError(f"can't parse timetz {s!r}") - - # Parse the microseconds - cdef int us = 0 - if ptr[0] == b".": - ptr = _parse_micros(ptr + 1, &us) - - # Parse the timezone - cdef int offsecs = _parse_timezone_to_seconds(&ptr, end) - if ptr == NULL: - s = bytes(data).decode("utf8", "replace") - raise e.DataError(f"can't parse timetz {s!r}") - - tz = _timezone_from_seconds(offsecs) - try: - return cdt.time_new(vals[0], vals[1], vals[2], us, tz) - except ValueError as ex: - s = bytes(data).decode("utf8", "replace") - raise e.DataError(f"can't parse timetz {s!r}: {ex}") from None - - -@cython.final -cdef class TimetzBinaryLoader(CLoader): - - format = PQ_BINARY - - cdef object cload(self, const char *data, size_t length): - cdef uint64_t beval - memcpy(&beval, data, sizeof(beval)) - cdef int64_t val = endian.be64toh(beval) - - cdef uint32_t beoff - memcpy(&beoff, data + sizeof(beval), sizeof(beoff)) - cdef int32_t off = endian.be32toh(beoff) - - cdef int h, m, s, us - - with cython.cdivision(True): - us = val % 1_000_000 - val //= 1_000_000 - - s = val % 60 - val //= 60 - - m = val % 60 - h = (val // 60) - - tz = _timezone_from_seconds(-off) - try: - return cdt.time_new(h, m, s, us, tz) - except ValueError: - raise e.DataError( - f"time not supported by Python: hour={h}" - ) from None - - -@cython.final -cdef class TimestampLoader(CLoader): - - format = PQ_TEXT - cdef int _order - - def __cinit__(self, oid: int, context: AdaptContext | None = None): - - cdef const char *ds = _get_datestyle(self._pgconn) - if ds[0] == b'I': # ISO - self._order = ORDER_YMD - elif ds[0] == b'G': # German - self._order = ORDER_DMY - elif ds[0] == b'S': # SQL, DMY / MDY - self._order = ORDER_DMY if ds[5] == b'D' else ORDER_MDY - elif ds[0] == b'P': # Postgres, DMY / MDY - self._order = ORDER_PGDM if ds[10] == b'D' else ORDER_PGMD - else: - raise e.InterfaceError(f"unexpected DateStyle: {ds.decode('ascii')}") - - cdef object cload(self, const char *data, size_t length): - cdef const char *end = data + length - if end[-1] == b'C': # ends with BC - raise _get_timestamp_load_error(self._pgconn, data) from None - - if self._order == ORDER_PGDM or self._order == ORDER_PGMD: - return self._cload_pg(data, end) - - cdef int64_t vals[6] - memset(vals, 0, sizeof(vals)) - cdef const char *ptr - - # Parse the first 6 groups of digits (date and time) - ptr = _parse_date_values(data, end, vals, ARRAYSIZE(vals)) - if ptr == NULL: - raise _get_timestamp_load_error(self._pgconn, data) from None - - # Parse the microseconds - cdef int us = 0 - if ptr[0] == b".": - ptr = _parse_micros(ptr + 1, &us) - - # Resolve the YMD order - cdef int y, m, d - if self._order == ORDER_YMD: - y, m, d = vals[0], vals[1], vals[2] - elif self._order == ORDER_DMY: - d, m, y = vals[0], vals[1], vals[2] - else: # self._order == ORDER_MDY - m, d, y = vals[0], vals[1], vals[2] - - try: - return cdt.datetime_new( - y, m, d, vals[3], vals[4], vals[5], us, None) - except ValueError as ex: - raise _get_timestamp_load_error(self._pgconn, data, ex) from None - - cdef object _cload_pg(self, const char *data, const char *end): - cdef int64_t vals[4] - memset(vals, 0, sizeof(vals)) - cdef const char *ptr - - # Find Wed Jun 02 or Wed 02 Jun - cdef char *seps[3] - seps[0] = strchr(data, b' ') - seps[1] = strchr(seps[0] + 1, b' ') if seps[0] != NULL else NULL - seps[2] = strchr(seps[1] + 1, b' ') if seps[1] != NULL else NULL - if seps[2] == NULL: - raise _get_timestamp_load_error(self._pgconn, data) from None - - # Parse the following 3 groups of digits (time) - ptr = _parse_date_values(seps[2] + 1, end, vals, 3) - if ptr == NULL: - raise _get_timestamp_load_error(self._pgconn, data) from None - - # Parse the microseconds - cdef int us = 0 - if ptr[0] == b".": - ptr = _parse_micros(ptr + 1, &us) - - # Parse the year - ptr = _parse_date_values(ptr + 1, end, vals + 3, 1) - if ptr == NULL: - raise _get_timestamp_load_error(self._pgconn, data) from None - - # Resolve the MD order - cdef int m, d - try: - if self._order == ORDER_PGDM: - d = int(seps[0][1 : seps[1] - seps[0]]) - m = _month_abbr[seps[1][1 : seps[2] - seps[1]]] - else: # self._order == ORDER_PGMD - m = _month_abbr[seps[0][1 : seps[1] - seps[0]]] - d = int(seps[1][1 : seps[2] - seps[1]]) - except (KeyError, ValueError) as ex: - raise _get_timestamp_load_error(self._pgconn, data, ex) from None - - try: - return cdt.datetime_new( - vals[3], m, d, vals[0], vals[1], vals[2], us, None) - except ValueError as ex: - raise _get_timestamp_load_error(self._pgconn, data, ex) from None - - -@cython.final -cdef class TimestampBinaryLoader(CLoader): - - format = PQ_BINARY - - cdef object cload(self, const char *data, size_t length): - cdef uint64_t beval - memcpy(&beval, data, sizeof(beval)) - cdef int64_t val = endian.be64toh(beval) - cdef int64_t micros, secs, days - - # Work only with positive values as the cdivision behaves differently - # with negative values, and cdivision=False adds overhead. - cdef int64_t aval = val if val >= 0 else -val - - # Group the micros in biggers stuff or timedelta_new might overflow - with cython.cdivision(True): - secs = aval // 1_000_000 - micros = aval % 1_000_000 - - days = secs // 86_400 - secs %= 86_400 - - try: - delta = cdt.timedelta_new(days, secs, micros) - if val > 0: - return pg_datetime_epoch + delta - else: - return pg_datetime_epoch - delta - - except OverflowError: - if val <= 0: - raise e.DataError("timestamp too small (before year 1)") from None - else: - raise e.DataError("timestamp too large (after year 10K)") from None - - -cdef class _BaseTimestamptzLoader(CLoader): - cdef object _time_zone - - def __cinit__(self, oid: int, context: AdaptContext | None = None): - self._time_zone = _timezone_from_connection(self._pgconn) - - -@cython.final -cdef class TimestamptzLoader(_BaseTimestamptzLoader): - - format = PQ_TEXT - cdef int _order - - def __cinit__(self, oid: int, context: AdaptContext | None = None): - - cdef const char *ds = _get_datestyle(self._pgconn) - if ds[0] == b'I': # ISO - self._order = ORDER_YMD - else: # Not true, but any non-YMD will do. - self._order = ORDER_DMY - - cdef object cload(self, const char *data, size_t length): - if self._order != ORDER_YMD: - return self._cload_notimpl(data, length) - - cdef const char *end = data + length - if end[-1] == b'C': # ends with BC - raise _get_timestamp_load_error(self._pgconn, data) from None - - cdef int64_t vals[6] - memset(vals, 0, sizeof(vals)) - - # Parse the first 6 groups of digits (date and time) - cdef const char *ptr - ptr = _parse_date_values(data, end, vals, ARRAYSIZE(vals)) - if ptr == NULL: - raise _get_timestamp_load_error(self._pgconn, data) from None - - # Parse the microseconds - cdef int us = 0 - if ptr[0] == b".": - ptr = _parse_micros(ptr + 1, &us) - - # Resolve the YMD order - cdef int y, m, d - if self._order == ORDER_YMD: - y, m, d = vals[0], vals[1], vals[2] - elif self._order == ORDER_DMY: - d, m, y = vals[0], vals[1], vals[2] - else: # self._order == ORDER_MDY - m, d, y = vals[0], vals[1], vals[2] - - # Parse the timezone - cdef int offsecs = _parse_timezone_to_seconds(&ptr, end) - if ptr == NULL: - raise _get_timestamp_load_error(self._pgconn, data) from None - - tzoff = cdt.timedelta_new(0, offsecs, 0) - - # The return value is a datetime with the timezone of the connection - # (in order to be consistent with the binary loader, which is the only - # thing it can return). So create a temporary datetime object, in utc, - # shift it by the offset parsed from the timestamp, and then move it to - # the connection timezone. - dt = None - try: - dt = cdt.datetime_new( - y, m, d, vals[3], vals[4], vals[5], us, timezone_utc) - dt -= tzoff - return PyObject_CallFunctionObjArgs(datetime_astimezone, - dt, self._time_zone, NULL) - except OverflowError as ex: - # If we have created the temporary 'dt' it means that we have a - # datetime close to max, the shift pushed it past max, overflowing. - # In this case return the datetime in a fixed offset timezone. - if dt is not None: - return dt.replace(tzinfo=timezone(tzoff)) - else: - ex1 = ex - except ValueError as ex: - ex1 = ex - - raise _get_timestamp_load_error(self._pgconn, data, ex1) from None - - cdef object _cload_notimpl(self, const char *data, size_t length): - s = bytes(data)[:length].decode("utf8", "replace") - ds = _get_datestyle(self._pgconn).decode() - raise NotImplementedError( - f"can't parse timestamptz with DateStyle {ds!r}: {s!r}" - ) - - -@cython.final -cdef class TimestamptzBinaryLoader(_BaseTimestamptzLoader): - - format = PQ_BINARY - - cdef object cload(self, const char *data, size_t length): - cdef uint64_t bedata - memcpy(&bedata, data, sizeof(bedata)) - cdef int64_t val = endian.be64toh(bedata) - cdef int64_t micros, secs, days - - # Work only with positive values as the cdivision behaves differently - # with negative values, and cdivision=False adds overhead. - cdef int64_t aval = val if val >= 0 else -val - - # Group the micros in biggers stuff or timedelta_new might overflow - with cython.cdivision(True): - secs = aval // 1_000_000 - micros = aval % 1_000_000 - - days = secs // 86_400 - secs %= 86_400 - - try: - delta = cdt.timedelta_new(days, secs, micros) - if val > 0: - dt = pg_datetimetz_epoch + delta - else: - dt = pg_datetimetz_epoch - delta - return PyObject_CallFunctionObjArgs(datetime_astimezone, - dt, self._time_zone, NULL) - - except OverflowError: - # If we were asked about a timestamp which would overflow in UTC, - # but not in the desired timezone (e.g. datetime.max at Chicago - # timezone) we can still save the day by shifting the value by the - # timezone offset and then replacing the timezone. - if self._time_zone is not None: - utcoff = self._time_zone.utcoffset( - datetime.min if val < 0 else datetime.max - ) - if utcoff: - usoff = 1_000_000 * int(utcoff.total_seconds()) - try: - ts = pg_datetime_epoch + timedelta( - microseconds=val + usoff - ) - except OverflowError: - pass # will raise downstream - else: - return ts.replace(tzinfo=self._time_zone) - - if val <= 0: - raise e.DataError( - "timestamp too small (before year 1)" - ) from None - else: - raise e.DataError( - "timestamp too large (after year 10K)" - ) from None - - -@cython.final -cdef class IntervalLoader(CLoader): - - format = PQ_TEXT - cdef int _style - - def __cinit__(self, oid: int, context: AdaptContext | None = None): - - cdef const char *ds = _get_intervalstyle(self._pgconn) - if ds[0] == b'p' and ds[8] == 0: # postgres - self._style = INTERVALSTYLE_POSTGRES - else: # iso_8601, sql_standard, postgres_verbose - self._style = INTERVALSTYLE_OTHERS - - cdef object cload(self, const char *data, size_t length): - if self._style == INTERVALSTYLE_OTHERS: - return self._cload_notimpl(data, length) - - cdef int days = 0, us = 0 - cdef int64_t secs = 0 - cdef char sign - cdef int64_t val - cdef const char *ptr = data - cdef const char *sep - cdef const char *end = ptr + length - - # If there are spaces, there is a [+|-]n [days|months|years] - while True: - if ptr[0] == b'-' or ptr[0] == b'+': - sign = ptr[0] - ptr += 1 - else: - sign = 0 - - sep = strchr(ptr, b' ') - if sep == NULL or sep > end: - break - - val = 0 - ptr = _parse_date_values(ptr, end, &val, 1) - if ptr == NULL: - s = bytes(data).decode("utf8", "replace") - raise e.DataError(f"can't parse interval {s!r}") - - if sign == b'-': - val = -val - - if ptr[1] == b'y': - days += 365 * val - elif ptr[1] == b'm': - days += 30 * val - elif ptr[1] == b'd': - days += val - else: - s = bytes(data).decode("utf8", "replace") - raise e.DataError(f"can't parse interval {s!r}") - - # Skip the date part word. - ptr = strchr(ptr + 1, b' ') - if ptr != NULL and ptr < end: - ptr += 1 - else: - break - - # Parse the time part. An eventual sign was already consumed in the loop - cdef int64_t vals[3] - memset(vals, 0, sizeof(vals)) - if ptr != NULL: - ptr = _parse_date_values(ptr, end, vals, ARRAYSIZE(vals)) - if ptr == NULL: - s = bytes(data).decode("utf8", "replace") - raise e.DataError(f"can't parse interval {s!r}") - - secs = vals[2] + 60 * (vals[1] + 60 * vals[0]) - - if secs > 86_400: - days += secs // 86_400 - secs %= 86_400 - - if ptr[0] == b'.': - ptr = _parse_micros(ptr + 1, &us) - - if sign == b'-': - secs = -secs - us = -us - - try: - return cdt.timedelta_new(days, secs, us) - except OverflowError as ex: - s = bytes(data).decode("utf8", "replace") - raise e.DataError(f"can't parse interval {s!r}: {ex}") from None - - cdef object _cload_notimpl(self, const char *data, size_t length): - s = bytes(data).decode("utf8", "replace") - style = _get_intervalstyle(self._pgconn).decode() - raise NotImplementedError( - f"can't parse interval with IntervalStyle {style!r}: {s!r}" - ) - - -@cython.final -cdef class IntervalBinaryLoader(CLoader): - - format = PQ_BINARY - - cdef object cload(self, const char *data, size_t length): - cdef int64_t beval - cdef int32_t bedm[2] - memcpy(&beval, data, sizeof(beval)) - memcpy(bedm, data + sizeof(beval), sizeof(bedm)) - cdef int64_t val = endian.be64toh(beval) - cdef int32_t days = endian.be32toh(bedm[0]) - cdef int32_t months = endian.be32toh(bedm[1]) - - cdef int years - with cython.cdivision(True): - if months > 0: - years = months // 12 - months %= 12 - days += 30 * months + 365 * years - elif months < 0: - months = -months - years = months // 12 - months %= 12 - days -= 30 * months + 365 * years - - # Work only with positive values as the cdivision behaves differently - # with negative values, and cdivision=False adds overhead. - cdef int64_t aval = val if val >= 0 else -val - cdef int64_t us, ussecs, usdays - - # Group the micros in bigger stuff or timedelta_new might overflow - with cython.cdivision(True): - ussecs = (aval // 1_000_000) - us = aval % 1_000_000 - - usdays = ussecs // 86_400 - ussecs %= 86_400 - - if val < 0: - ussecs = -ussecs - usdays = -usdays - us = -us - - try: - return cdt.timedelta_new(days + usdays, ussecs, us) - except OverflowError as ex: - raise e.DataError(f"can't parse interval: {ex}") - - -cdef const char *_parse_date_values( - const char *ptr, const char *end, int64_t *vals, int nvals -): - """ - Parse *nvals* numeric values separated by non-numeric chars. - - Write the result in the *vals* array (assumed zeroed) starting from *start*. - - Return the pointer at the separator after the final digit. - """ - cdef int ival = 0 - while ptr < end: - if b'0' <= ptr[0] <= b'9': - vals[ival] = vals[ival] * 10 + (ptr[0] - b'0') - else: - ival += 1 - if ival >= nvals: - break - - ptr += 1 - - return ptr - - -cdef const char *_parse_micros(const char *start, int *us): - """ - Parse microseconds from a string. - - Micros are assumed up to 6 digit chars separated by a non-digit. - - Return the pointer at the separator after the final digit. - """ - cdef const char *ptr = start - while ptr[0]: - if b'0' <= ptr[0] <= b'9': - us[0] = us[0] * 10 + (ptr[0] - b'0') - else: - break - - ptr += 1 - - # Pad the fraction of second to get millis - if us[0] and ptr - start < 6: - us[0] *= _uspad[ptr - start] - - return ptr - - -cdef int _parse_timezone_to_seconds(const char **bufptr, const char *end): - """ - Parse a timezone from a string, return Python timezone object. - - Modify the buffer pointer to point at the first character after the - timezone parsed. In case of parse error make it NULL. - """ - cdef const char *ptr = bufptr[0] - cdef char sgn = ptr[0] - - # Parse at most three groups of digits - cdef int64_t vals[3] - memset(vals, 0, sizeof(vals)) - - ptr = _parse_date_values(ptr + 1, end, vals, ARRAYSIZE(vals)) - if ptr == NULL: - return 0 - - cdef int off = 60 * (60 * vals[0] + vals[1]) + vals[2] - return -off if sgn == b"-" else off - - -cdef object _timezone_from_seconds(int sec, __cache={}): - cdef object pysec = sec - cdef PyObject *ptr = PyDict_GetItem(__cache, pysec) - if ptr != NULL: - return ptr - - delta = cdt.timedelta_new(0, sec, 0) - tz = timezone(delta) - __cache[pysec] = tz - return tz - - -cdef object _get_timestamp_load_error( - pq.PGconn pgconn, const char *data, ex: Exception | None = None -): - s = bytes(data).decode("utf8", "replace") - - def is_overflow(s): - if not s: - return False - - ds = _get_datestyle(pgconn) - if not ds.startswith(b"P"): # Postgres - return len(s.split()[0]) > 10 # date is first token - else: - return len(s.split()[-1]) > 4 # year is last token - - if s == "-infinity" or s.endswith("BC"): - return e.DataError(f"timestamp too small (before year 1): {s!r}") - elif s == "infinity" or is_overflow(s): - return e.DataError(f"timestamp too large (after year 10K): {s!r}") - else: - return e.DataError(f"can't parse timestamp {s!r}: {ex or '(unknown)'}") - - -cdef _timezones = {} -_timezones[None] = timezone_utc -_timezones[b"UTC"] = timezone_utc - - -cdef object _timezone_from_connection(pq.PGconn pgconn): - """Return the Python timezone info of the connection's timezone.""" - if pgconn is None: - return timezone_utc - - cdef bytes tzname = libpq.PQparameterStatus(pgconn._pgconn_ptr, b"TimeZone") - cdef PyObject *ptr = PyDict_GetItem(_timezones, tzname) - if ptr != NULL: - return ptr - - sname = tzname.decode() if tzname else "UTC" - try: - zi = ZoneInfo(sname) - except (KeyError, OSError): - logger.warning( - "unknown PostgreSQL timezone: %r; will use UTC", sname - ) - zi = timezone_utc - except Exception as ex: - logger.warning( - "error handling PostgreSQL timezone: %r; will use UTC (%s - %s)", - sname, - type(ex).__name__, - ex, - ) - zi = timezone.utc - - _timezones[tzname] = zi - return zi - - -cdef const char *_get_datestyle(pq.PGconn pgconn): - cdef const char *ds - if pgconn is not None: - ds = libpq.PQparameterStatus(pgconn._pgconn_ptr, b"DateStyle") - if ds is not NULL and ds[0]: - return ds - - return b"ISO, DMY" - - -cdef const char *_get_intervalstyle(pq.PGconn pgconn): - cdef const char *ds - if pgconn is not None: - ds = libpq.PQparameterStatus(pgconn._pgconn_ptr, b"IntervalStyle") - if ds is not NULL and ds[0]: - return ds - - return b"postgres" diff --git a/psycopg_c/psycopg_c/types/numeric.pyx b/psycopg_c/psycopg_c/types/numeric.pyx deleted file mode 100644 index 24fd0c4ad..000000000 --- a/psycopg_c/psycopg_c/types/numeric.pyx +++ /dev/null @@ -1,820 +0,0 @@ -""" -Cython adapters for numeric types. -""" - -# Copyright (C) 2020 The Psycopg Team - -cimport cython -from cpython.mem cimport PyMem_Free -from libc.stdint cimport * -from libc.string cimport memcpy, memset, strlen -from cpython.dict cimport PyDict_GetItem, PyDict_SetItem -from cpython.long cimport PyLong_AsLongLong, PyLong_FromLong, PyLong_FromLongLong -from cpython.long cimport PyLong_FromString, PyLong_FromUnsignedLong -from cpython.bytes cimport PyBytes_AsStringAndSize -from cpython.float cimport PyFloat_AsDouble, PyFloat_FromDouble -from cpython.unicode cimport PyUnicode_DecodeUTF8 - -import sys -from decimal import Context, Decimal, DefaultContext - -from psycopg_c._psycopg cimport endian - -from psycopg import errors as e -from psycopg._wrappers import Int2, Int4, Int8, IntNumeric - - -cdef extern from "Python.h": - # work around https://github.com/cython/cython/issues/3909 - double PyOS_string_to_double( - const char *s, char **endptr, PyObject *overflow_exception) except? -1.0 - char *PyOS_double_to_string( - double val, char format_code, int precision, int flags, int *ptype - ) except NULL - int Py_DTSF_ADD_DOT_0 - long long PyLong_AsLongLongAndOverflow(object pylong, int *overflow) except? -1 - - # Missing in cpython/unicode.pxd - const char *PyUnicode_AsUTF8(object unicode) except NULL - - -# defined in numutils.c -cdef extern from *: - """ -int pg_lltoa(int64_t value, char *a); -#define MAXINT8LEN 20 - """ - int pg_lltoa(int64_t value, char *a) - const int MAXINT8LEN - - -cdef class _IntDumper(CDumper): - - format = PQ_TEXT - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - return dump_int_to_text(obj, rv, offset) - - def quote(self, obj) -> Buffer | None: - cdef Py_ssize_t length - - rv = PyByteArray_FromStringAndSize("", 0) - if obj >= 0: - length = self.cdump(obj, rv, 0) - else: - PyByteArray_Resize(rv, 23) - rv[0] = b' ' - length = 1 + self.cdump(obj, rv, 1) - - PyByteArray_Resize(rv, length) - return rv - - -cdef class _IntOrSubclassDumper(_IntDumper): - - format = PQ_TEXT - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - return dump_int_or_sub_to_text(obj, rv, offset) - - -@cython.final -cdef class Int2Dumper(_IntOrSubclassDumper): - - oid = oids.INT2_OID - - -@cython.final -cdef class Int4Dumper(_IntOrSubclassDumper): - - oid = oids.INT4_OID - - -@cython.final -cdef class Int8Dumper(_IntOrSubclassDumper): - - oid = oids.INT8_OID - - -@cython.final -cdef class IntNumericDumper(_IntOrSubclassDumper): - - oid = oids.NUMERIC_OID - - -@cython.final -cdef class Int2BinaryDumper(CDumper): - - format = PQ_BINARY - oid = oids.INT2_OID - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - return dump_int_to_int2_binary(obj, rv, offset) - - -@cython.final -cdef class Int4BinaryDumper(CDumper): - - format = PQ_BINARY - oid = oids.INT4_OID - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - return dump_int_to_int4_binary(obj, rv, offset) - - -@cython.final -cdef class Int8BinaryDumper(CDumper): - - format = PQ_BINARY - oid = oids.INT8_OID - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - return dump_int_to_int8_binary(obj, rv, offset) - - -cdef extern from *: - """ -/* Ratio between number of bits required to store a number and number of pg - * decimal digits required (log(2) / log(10_000)). - */ -#define BIT_PER_PGDIGIT 0.07525749891599529 - -/* decimal digits per Postgres "digit" */ -#define DEC_DIGITS 4 - -#define NUMERIC_POS 0x0000 -#define NUMERIC_NEG 0x4000 -#define NUMERIC_NAN 0xC000 -#define NUMERIC_PINF 0xD000 -#define NUMERIC_NINF 0xF000 -""" - const double BIT_PER_PGDIGIT - const int DEC_DIGITS - const int NUMERIC_POS - const int NUMERIC_NEG - const int NUMERIC_NAN - const int NUMERIC_PINF - const int NUMERIC_NINF - - -@cython.final -cdef class IntNumericBinaryDumper(CDumper): - - format = PQ_BINARY - oid = oids.NUMERIC_OID - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - return dump_int_to_numeric_binary(obj, rv, offset) - - -cdef class IntDumper(CDumper): - - format = PQ_TEXT - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - raise TypeError( - f"{type(self).__name__} is a dispatcher to other dumpers:" - " dump() is not supposed to be called" - ) - - cpdef get_key(self, obj, format): - cdef long long val - cdef int overflow - - val = PyLong_AsLongLongAndOverflow(obj, &overflow) - if overflow: - return IntNumeric - - if INT32_MIN <= obj <= INT32_MAX: - if INT16_MIN <= obj <= INT16_MAX: - return Int2 - else: - return Int4 - else: - if INT64_MIN <= obj <= INT64_MAX: - return Int8 - else: - return IntNumeric - - _int2_dumper = Int2Dumper - _int4_dumper = Int4Dumper - _int8_dumper = Int8Dumper - _int_numeric_dumper = IntNumericDumper - - cpdef upgrade(self, obj, format): - cdef long long val - cdef int overflow - - val = PyLong_AsLongLongAndOverflow(obj, &overflow) - if overflow: - return self._int_numeric_dumper(IntNumeric) - - if INT32_MIN <= obj <= INT32_MAX: - if INT16_MIN <= obj <= INT16_MAX: - return self._int2_dumper(Int2) - else: - return self._int4_dumper(Int4) - else: - if INT64_MIN <= obj <= INT64_MAX: - return self._int8_dumper(Int8) - else: - return self._int_numeric_dumper(IntNumeric) - - -@cython.final -cdef class IntBinaryDumper(IntDumper): - - format = PQ_BINARY - - _int2_dumper = Int2BinaryDumper - _int4_dumper = Int4BinaryDumper - _int8_dumper = Int8BinaryDumper - _int_numeric_dumper = IntNumericBinaryDumper - - -@cython.final -cdef class IntLoader(CLoader): - - format = PQ_TEXT - - cdef object cload(self, const char *data, size_t length): - # if the number ends with a 0 we don't need a copy - if data[length] == b'\0': - return PyLong_FromString(data, NULL, 10) - - # Otherwise we have to copy it aside - if length > MAXINT8LEN: - raise ValueError("string too big for an int") - - cdef char[MAXINT8LEN + 1] buf - memcpy(buf, data, length) - buf[length] = 0 - return PyLong_FromString(buf, NULL, 10) - - - -@cython.final -cdef class Int2BinaryLoader(CLoader): - - format = PQ_BINARY - - cdef object cload(self, const char *data, size_t length): - cdef int16_t bedata - memcpy(&bedata, data, sizeof(bedata)) - return PyLong_FromLong(endian.be16toh(bedata)) - - -@cython.final -cdef class Int4BinaryLoader(CLoader): - - format = PQ_BINARY - - cdef object cload(self, const char *data, size_t length): - cdef int32_t bedata - memcpy(&bedata, data, sizeof(bedata)) - return PyLong_FromLong(endian.be32toh(bedata)) - - -@cython.final -cdef class Int8BinaryLoader(CLoader): - - format = PQ_BINARY - - cdef object cload(self, const char *data, size_t length): - cdef int64_t bedata - memcpy(&bedata, data, sizeof(bedata)) - return PyLong_FromLongLong(endian.be64toh(bedata)) - - -@cython.final -cdef class OidBinaryLoader(CLoader): - - format = PQ_BINARY - - cdef object cload(self, const char *data, size_t length): - cdef uint32_t bedata - memcpy(&bedata, data, sizeof(bedata)) - return PyLong_FromUnsignedLong(endian.be32toh(bedata)) - - -cdef class _FloatDumper(CDumper): - - format = PQ_TEXT - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - cdef double d = PyFloat_AsDouble(obj) - cdef char *out = PyOS_double_to_string( - d, b'r', 0, Py_DTSF_ADD_DOT_0, NULL) - cdef Py_ssize_t length = strlen(out) - cdef char *tgt = CDumper.ensure_size(rv, offset, length) - memcpy(tgt, out, length) - PyMem_Free(out) - return length - - def quote(self, obj) -> Buffer | None: - value = bytes(self.dump(obj)) - cdef PyObject *ptr = PyDict_GetItem(_special_float, value) - if ptr != NULL: - return ptr - - return value if obj >= 0 else b" " + value - -cdef dict _special_float = { - b"inf": b"'Infinity'::float8", - b"-inf": b"'-Infinity'::float8", - b"nan": b"'NaN'::float8", -} - - -@cython.final -cdef class FloatDumper(_FloatDumper): - - oid = oids.FLOAT8_OID - - -@cython.final -cdef class Float4Dumper(_FloatDumper): - - oid = oids.FLOAT4_OID - - -@cython.final -cdef class FloatBinaryDumper(CDumper): - - format = PQ_BINARY - oid = oids.FLOAT8_OID - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - cdef double d = PyFloat_AsDouble(obj) - cdef uint64_t ival - memcpy(&ival, &d, sizeof(ival)) - cdef uint64_t beval = endian.htobe64(ival) - cdef uint64_t *buf = CDumper.ensure_size( - rv, offset, sizeof(beval)) - memcpy(buf, &beval, sizeof(beval)) - return sizeof(beval) - - -@cython.final -cdef class Float4BinaryDumper(CDumper): - - format = PQ_BINARY - oid = oids.FLOAT4_OID - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - cdef float f = PyFloat_AsDouble(obj) - cdef uint32_t ival - memcpy(&ival, &f, sizeof(ival)) - cdef uint32_t beval = endian.htobe32(ival) - cdef uint32_t *buf = CDumper.ensure_size( - rv, offset, sizeof(beval)) - memcpy(buf, &beval, sizeof(beval)) - return sizeof(beval) - - -@cython.final -cdef class FloatLoader(CLoader): - - format = PQ_TEXT - - cdef object cload(self, const char *data, size_t length): - cdef char *endptr - cdef double d = PyOS_string_to_double( - data, &endptr, OverflowError) - return PyFloat_FromDouble(d) - - -@cython.final -cdef class Float4BinaryLoader(CLoader): - - format = PQ_BINARY - - cdef object cload(self, const char *data, size_t length): - cdef uint32_t bedata - memcpy(&bedata, data, sizeof(bedata)) - cdef uint32_t asint = endian.be32toh(bedata) - cdef float f - memcpy(&f, &asint, sizeof(asint)) - return PyFloat_FromDouble(f) - - -@cython.final -cdef class Float8BinaryLoader(CLoader): - - format = PQ_BINARY - - cdef object cload(self, const char *data, size_t length): - cdef uint64_t bedata - memcpy(&bedata, data, sizeof(bedata)) - cdef uint64_t asint = endian.be64toh(bedata) - cdef double d - memcpy(&d, &asint, sizeof(asint)) - return PyFloat_FromDouble(d) - - -@cython.final -cdef class DecimalDumper(CDumper): - - format = PQ_TEXT - oid = oids.NUMERIC_OID - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - return dump_decimal_to_text(obj, rv, offset) - - def quote(self, obj) -> Buffer | None: - value = bytes(self.dump(obj)) - cdef PyObject *ptr = PyDict_GetItem(_special_decimal, value) - if ptr != NULL: - return ptr - - return value if obj >= 0 else b" " + value - -cdef dict _special_decimal = { - b"Infinity": b"'Infinity'::numeric", - b"-Infinity": b"'-Infinity'::numeric", - b"NaN": b"'NaN'::numeric", -} - - -@cython.final -cdef class NumericLoader(CLoader): - - format = PQ_TEXT - - cdef object cload(self, const char *data, size_t length): - s = PyUnicode_DecodeUTF8(data, length, NULL) - return Decimal(s) - - -cdef dict _decimal_special = { - NUMERIC_NAN: Decimal("NaN"), - NUMERIC_PINF: Decimal("Infinity"), - NUMERIC_NINF: Decimal("-Infinity"), -} - -cdef dict _contexts = {} -for _i in range(DefaultContext.prec): - _contexts[_i] = DefaultContext - - -@cython.final -cdef class NumericBinaryLoader(CLoader): - - format = PQ_BINARY - - cdef object cload(self, const char *data, size_t length): - - cdef uint16_t behead[4] - memcpy(&behead, data, sizeof(behead)) - cdef uint16_t ndigits = endian.be16toh(behead[0]) - cdef int16_t weight = endian.be16toh(behead[1]) - cdef uint16_t sign = endian.be16toh(behead[2]) - cdef uint16_t dscale = endian.be16toh(behead[3]) - - cdef int shift - cdef int i - cdef PyObject *pctx - cdef object key - cdef const char *digitptr - cdef uint16_t bedigit - - if sign == NUMERIC_POS or sign == NUMERIC_NEG: - if length != (4 + ndigits) * sizeof(uint16_t): - raise e.DataError("bad ndigits in numeric binary representation") - - val = 0 - digitptr = data + sizeof(behead) - for i in range(ndigits): - memcpy(&bedigit, digitptr, sizeof(bedigit)) - digitptr += sizeof(bedigit) - val *= 10_000 - val += endian.be16toh(bedigit) - - shift = dscale - (ndigits - weight - 1) * DEC_DIGITS - - key = (weight + 2) * DEC_DIGITS + dscale - pctx = PyDict_GetItem(_contexts, key) - if pctx == NULL: - ctx = Context(prec=key) - PyDict_SetItem(_contexts, key, ctx) - pctx = ctx - - return ( - Decimal(val if sign == NUMERIC_POS else -val) - .scaleb(-dscale, pctx) - .shift(shift, pctx) - ) - else: - try: - return _decimal_special[sign] - except KeyError: - raise e.DataError(f"bad value for numeric sign: 0x{sign:X}") - - -@cython.final -cdef class DecimalBinaryDumper(CDumper): - - format = PQ_BINARY - oid = oids.NUMERIC_OID - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - return dump_decimal_to_numeric_binary(obj, rv, offset) - - -_int_classes = None - - -cdef class _MixedNumericDumper(CDumper): - - oid = oids.NUMERIC_OID - - def __cinit__(self, cls, context: AdaptContext | None = None): - global _int_classes - - if _int_classes is None: - if "numpy" in sys.modules: - import numpy - _int_classes = (int, numpy.integer) - else: - _int_classes = int - - -@cython.final -cdef class NumericDumper(_MixedNumericDumper): - - format = PQ_TEXT - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - if type(obj) is int: # fast path - return dump_int_to_text(obj, rv, offset) - elif isinstance(obj, Decimal): - return dump_decimal_to_text(obj, rv, offset) - elif isinstance(obj, _int_classes): - return dump_int_to_text(obj, rv, offset) - else: - raise TypeError( - f"class {type(self).__name__} cannot dump {type(obj).__name__}" - ) - - -@cython.final -cdef class NumericBinaryDumper(_MixedNumericDumper): - - format = PQ_BINARY - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - if type(obj) is int: - return dump_int_to_numeric_binary(obj, rv, offset) - elif isinstance(obj, Decimal): - return dump_decimal_to_numeric_binary(obj, rv, offset) - elif isinstance(obj, _int_classes): - return dump_int_to_numeric_binary(int(obj), rv, offset) - else: - raise TypeError( - f"class {type(self).__name__} cannot dump {type(obj).__name__}" - ) - - -cdef Py_ssize_t dump_decimal_to_text(obj, bytearray rv, Py_ssize_t offset) except -1: - cdef char *src - cdef Py_ssize_t length - cdef char *buf - - b = bytes(str(obj), "utf-8") - PyBytes_AsStringAndSize(b, &src, &length) - - if src[0] != b's': - buf = CDumper.ensure_size(rv, offset, length) - memcpy(buf, src, length) - - else: # convert sNaN to NaN - length = 3 # NaN - buf = CDumper.ensure_size(rv, offset, length) - memcpy(buf, b"NaN", length) - - return length - - -cdef extern from *: - """ -/* Weights of py digits into a pg digit according to their positions. */ -static const int pydigit_weights[] = {1000, 100, 10, 1}; -""" - const int[4] pydigit_weights - - -@cython.cdivision(True) -cdef Py_ssize_t dump_decimal_to_numeric_binary( - obj, bytearray rv, Py_ssize_t offset -) except -1: - - # TODO: this implementation is about 30% slower than the text dump. - # This might be probably optimised by accessing the C structure of - # the Decimal object, if available, which would save the creation of - # several intermediate Python objects (the DecimalTuple, the digits - # tuple, and then accessing them). - - cdef object t = obj.as_tuple() - cdef int sign = t[0] - cdef tuple digits = t[1] - cdef uint16_t *buf - cdef uint16_t behead[4] - cdef Py_ssize_t length - - cdef object pyexp = t[2] - cdef const char *bexp - - if not isinstance(pyexp, int): - # Handle inf, nan - buf = CDumper.ensure_size(rv, offset, sizeof(behead)) - behead[0] = 0 - behead[1] = 0 - behead[3] = 0 - bexp = PyUnicode_AsUTF8(pyexp) - if bexp[0] == b'n' or bexp[0] == b'N': - behead[2] = endian.htobe16(NUMERIC_NAN) - elif bexp[0] == b'F': - if sign: - behead[2] = endian.htobe16(NUMERIC_NINF) - else: - behead[2] = endian.htobe16(NUMERIC_PINF) - else: - raise e.DataError(f"unexpected decimal exponent: {pyexp}") - memcpy(buf, behead, sizeof(behead)) - return sizeof(behead) - - cdef int exp = pyexp - cdef uint16_t ndigits = len(digits) - - # Find the last nonzero digit - cdef int nzdigits = ndigits - while nzdigits > 0 and digits[nzdigits - 1] == 0: - nzdigits -= 1 - - cdef uint16_t dscale - if exp <= 0: - dscale = -exp - else: - dscale = 0 - # align the py digits to the pg digits if there's some py exponent - ndigits += exp % DEC_DIGITS - - if nzdigits == 0: - buf = CDumper.ensure_size(rv, offset, sizeof(behead)) - behead[0] = 0 # ndigits - behead[1] = 0 # weight - behead[2] = endian.htobe16(NUMERIC_POS) # sign - behead[3] = endian.htobe16(dscale) - memcpy(buf, behead, sizeof(behead)) - return sizeof(behead) - - # Equivalent of 0-padding left to align the py digits to the pg digits - # but without changing the digits tuple. - cdef int wi = 0 - cdef int mod = (ndigits - dscale) % DEC_DIGITS - if mod < 0: - # the difference between C and Py % operator - mod += 4 - if mod: - wi = DEC_DIGITS - mod - ndigits += wi - - cdef int tmp = nzdigits + wi - cdef int pgdigits = tmp // DEC_DIGITS + (tmp % DEC_DIGITS and 1) - length = sizeof(behead) + pgdigits * sizeof(uint16_t) - buf = CDumper.ensure_size(rv, offset, length) - behead[0] = endian.htobe16(pgdigits) - behead[1] = endian.htobe16(((ndigits + exp) // DEC_DIGITS - 1)) - behead[2] = endian.htobe16(NUMERIC_NEG) if sign else endian.htobe16(NUMERIC_POS) - behead[3] = endian.htobe16(dscale) - memcpy(buf, behead, sizeof(behead)) - buf += 4 - - cdef uint16_t pgdigit = 0, bedigit - for i in range(nzdigits): - pgdigit += pydigit_weights[wi] * (digits[i]) - wi += 1 - if wi >= DEC_DIGITS: - bedigit = endian.htobe16(pgdigit) - memcpy(buf, &bedigit, sizeof(bedigit)) - buf += 1 - pgdigit = wi = 0 - - if pgdigit: - bedigit = endian.htobe16(pgdigit) - memcpy(buf, &bedigit, sizeof(bedigit)) - - return length - - -cdef Py_ssize_t dump_int_to_text(obj, bytearray rv, Py_ssize_t offset) except -1: - cdef long long val - cdef int overflow - cdef char *buf - cdef char *src - cdef Py_ssize_t length - - val = PyLong_AsLongLongAndOverflow(obj, &overflow) - if not overflow: - buf = CDumper.ensure_size(rv, offset, MAXINT8LEN + 1) - length = pg_lltoa(val, buf) - else: - b = bytes(str(obj), "utf-8") - PyBytes_AsStringAndSize(b, &src, &length) - buf = CDumper.ensure_size(rv, offset, length) - memcpy(buf, src, length) - - return length - - -cdef Py_ssize_t dump_int_or_sub_to_text( - obj, bytearray rv, Py_ssize_t offset -) except -1: - cdef long long val - cdef int overflow - cdef char *buf - cdef char *src - cdef Py_ssize_t length - - # Ensure an int or a subclass. The 'is' type check is fast. - # Passing a float must give an error, but passing an Enum should work. - if type(obj) is not int and not isinstance(obj, int): - raise e.DataError(f"integer expected, got {type(obj).__name__!r}") - - val = PyLong_AsLongLongAndOverflow(obj, &overflow) - if not overflow: - buf = CDumper.ensure_size(rv, offset, MAXINT8LEN + 1) - length = pg_lltoa(val, buf) - else: - b = bytes(str(obj), "utf-8") - PyBytes_AsStringAndSize(b, &src, &length) - buf = CDumper.ensure_size(rv, offset, length) - memcpy(buf, src, length) - - return length - - -cdef Py_ssize_t dump_int_to_int2_binary( - obj, bytearray rv, Py_ssize_t offset -) except -1: - cdef int16_t val = PyLong_AsLongLong(obj) - cdef int16_t *buf = CDumper.ensure_size(rv, offset, sizeof(obj)) - cdef uint16_t beval = endian.htobe16(val) # swap bytes if needed - memcpy(buf, &beval, sizeof(beval)) - return sizeof(val) - - -cdef Py_ssize_t dump_int_to_int4_binary( - obj, bytearray rv, Py_ssize_t offset -) except -1: - cdef int32_t val = PyLong_AsLongLong(obj) - cdef int32_t *buf = CDumper.ensure_size(rv, offset, sizeof(val)) - cdef uint32_t beval = endian.htobe32(val) # swap bytes if needed - memcpy(buf, &beval, sizeof(beval)) - return sizeof(val) - - -cdef Py_ssize_t dump_int_to_int8_binary( - obj, bytearray rv, Py_ssize_t offset -) except -1: - cdef int64_t val = PyLong_AsLongLong(obj) - cdef int64_t *buf = CDumper.ensure_size(rv, offset, sizeof(val)) - cdef uint64_t beval = endian.htobe64(val) # swap bytes if needed - memcpy(buf, &beval, sizeof(beval)) - return sizeof(val) - - -cdef Py_ssize_t dump_int_to_numeric_binary(obj, bytearray rv, Py_ssize_t offset) except -1: - # Calculate the number of PG digits required to store the number - cdef uint16_t ndigits - ndigits = ((obj.bit_length()) * BIT_PER_PGDIGIT) + 1 - - cdef uint16_t sign = NUMERIC_POS - if obj < 0: - sign = NUMERIC_NEG - obj = -obj - - cdef Py_ssize_t length = sizeof(uint16_t) * (ndigits + 4) - cdef uint16_t *buf - buf = CDumper.ensure_size(rv, offset, length) - - cdef uint16_t behead[4] - behead[0] = endian.htobe16(ndigits) - behead[1] = endian.htobe16(ndigits - 1) # weight - behead[2] = endian.htobe16(sign) - behead[3] = 0 # dscale - memcpy(buf, behead, sizeof(behead)) - - cdef int i = 4 + ndigits - 1 - cdef uint16_t rem, berem - while obj: - rem = obj % 10000 - obj //= 10000 - berem = endian.htobe16(rem) - memcpy(buf + i, &berem, sizeof(berem)) - i -= 1 - while i > 3: - memset(buf + i, 0, sizeof(buf[0])) - i -= 1 - - return length diff --git a/psycopg_c/psycopg_c/types/numpy.pyx b/psycopg_c/psycopg_c/types/numpy.pyx deleted file mode 100644 index 9b4a0cbc2..000000000 --- a/psycopg_c/psycopg_c/types/numpy.pyx +++ /dev/null @@ -1,71 +0,0 @@ -""" -Cython adapters for numpy types. -""" - -# Copyright (C) 2020 The Psycopg Team - -cimport cython - - -@cython.final -cdef class NPInt16Dumper(_IntDumper): - - oid = oids.INT2_OID - - -@cython.final -cdef class NPInt32Dumper(_IntDumper): - - oid = oids.INT4_OID - - -@cython.final -cdef class NPInt64Dumper(_IntDumper): - - oid = oids.INT8_OID - - -@cython.final -cdef class NPNumericDumper(_IntDumper): - - oid = oids.NUMERIC_OID - - -@cython.final -cdef class NPInt16BinaryDumper(_IntDumper): - - oid = oids.INT2_OID - format = PQ_BINARY - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - return dump_int_to_int2_binary(int(obj), rv, offset) - - -@cython.final -cdef class NPInt32BinaryDumper(_IntDumper): - - oid = oids.INT4_OID - format = PQ_BINARY - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - return dump_int_to_int4_binary(int(obj), rv, offset) - - -@cython.final -cdef class NPInt64BinaryDumper(_IntDumper): - - oid = oids.INT8_OID - format = PQ_BINARY - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - return dump_int_to_int8_binary(int(obj), rv, offset) - - -@cython.final -cdef class NPNumericBinaryDumper(_IntDumper): - - oid = oids.NUMERIC_OID - format = PQ_BINARY - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - return dump_int_to_numeric_binary(int(obj), rv, offset) diff --git a/psycopg_c/psycopg_c/types/numutils.c b/psycopg_c/psycopg_c/types/numutils.c deleted file mode 100644 index 4be7108bb..000000000 --- a/psycopg_c/psycopg_c/types/numutils.c +++ /dev/null @@ -1,243 +0,0 @@ -/* - * Utilities to deal with numbers. - * - * Copyright (C) 2020 The Psycopg Team - * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - */ - -#include -#include - -#include "pg_config.h" - - -/* - * 64-bit integers - */ -#ifdef HAVE_LONG_INT_64 -/* Plain "long int" fits, use it */ - -# ifndef HAVE_INT64 -typedef long int int64; -# endif -# ifndef HAVE_UINT64 -typedef unsigned long int uint64; -# endif -# define INT64CONST(x) (x##L) -# define UINT64CONST(x) (x##UL) -#elif defined(HAVE_LONG_LONG_INT_64) -/* We have working support for "long long int", use that */ - -# ifndef HAVE_INT64 -typedef long long int int64; -# endif -# ifndef HAVE_UINT64 -typedef unsigned long long int uint64; -# endif -# define INT64CONST(x) (x##LL) -# define UINT64CONST(x) (x##ULL) -#else -/* neither HAVE_LONG_INT_64 nor HAVE_LONG_LONG_INT_64 */ -# error must have a working 64-bit integer datatype -#endif - - -#ifndef HAVE__BUILTIN_CLZ -static const uint8_t pg_leftmost_one_pos[256] = { - 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 -}; -#endif - -static const char DIGIT_TABLE[200] = { - '0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6', '0', - '7', '0', '8', '0', '9', '1', '0', '1', '1', '1', '2', '1', '3', '1', '4', - '1', '5', '1', '6', '1', '7', '1', '8', '1', '9', '2', '0', '2', '1', '2', - '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7', '2', '8', '2', '9', - '3', '0', '3', '1', '3', '2', '3', '3', '3', '4', '3', '5', '3', '6', '3', - '7', '3', '8', '3', '9', '4', '0', '4', '1', '4', '2', '4', '3', '4', '4', - '4', '5', '4', '6', '4', '7', '4', '8', '4', '9', '5', '0', '5', '1', '5', - '2', '5', '3', '5', '4', '5', '5', '5', '6', '5', '7', '5', '8', '5', '9', - '6', '0', '6', '1', '6', '2', '6', '3', '6', '4', '6', '5', '6', '6', '6', - '7', '6', '8', '6', '9', '7', '0', '7', '1', '7', '2', '7', '3', '7', '4', - '7', '5', '7', '6', '7', '7', '7', '8', '7', '9', '8', '0', '8', '1', '8', - '2', '8', '3', '8', '4', '8', '5', '8', '6', '8', '7', '8', '8', '8', '9', - '9', '0', '9', '1', '9', '2', '9', '3', '9', '4', '9', '5', '9', '6', '9', - '7', '9', '8', '9', '9' -}; - - -/* - * pg_leftmost_one_pos64 - * As above, but for a 64-bit word. - */ -static inline int -pg_leftmost_one_pos64(uint64_t word) -{ -#ifdef HAVE__BUILTIN_CLZ -#if defined(HAVE_LONG_INT_64) - return 63 - __builtin_clzl(word); -#elif defined(HAVE_LONG_LONG_INT_64) - return 63 - __builtin_clzll(word); -#else -#error must have a working 64-bit integer datatype -#endif -#else /* !HAVE__BUILTIN_CLZ */ - int shift = 64 - 8; - - while ((word >> shift) == 0) - shift -= 8; - - return shift + pg_leftmost_one_pos[(word >> shift) & 255]; -#endif /* HAVE__BUILTIN_CLZ */ -} - - -static inline int -decimalLength64(const uint64_t v) -{ - int t; - static const uint64_t PowersOfTen[] = { - UINT64CONST(1), UINT64CONST(10), - UINT64CONST(100), UINT64CONST(1000), - UINT64CONST(10000), UINT64CONST(100000), - UINT64CONST(1000000), UINT64CONST(10000000), - UINT64CONST(100000000), UINT64CONST(1000000000), - UINT64CONST(10000000000), UINT64CONST(100000000000), - UINT64CONST(1000000000000), UINT64CONST(10000000000000), - UINT64CONST(100000000000000), UINT64CONST(1000000000000000), - UINT64CONST(10000000000000000), UINT64CONST(100000000000000000), - UINT64CONST(1000000000000000000), UINT64CONST(10000000000000000000) - }; - - /* - * Compute base-10 logarithm by dividing the base-2 logarithm by a - * good-enough approximation of the base-2 logarithm of 10 - */ - t = (pg_leftmost_one_pos64(v) + 1) * 1233 / 4096; - return t + (v >= PowersOfTen[t]); -} - - -/* - * Get the decimal representation, not NUL-terminated, and return the length of - * same. Caller must ensure that a points to at least MAXINT8LEN bytes. - */ -int -pg_ulltoa_n(uint64_t value, char *a) -{ - int olength, - i = 0; - uint32_t value2; - - /* Degenerate case */ - if (value == 0) - { - *a = '0'; - return 1; - } - - olength = decimalLength64(value); - - /* Compute the result string. */ - while (value >= 100000000) - { - const uint64_t q = value / 100000000; - uint32_t value2 = (uint32_t) (value - 100000000 * q); - - const uint32_t c = value2 % 10000; - const uint32_t d = value2 / 10000; - const uint32_t c0 = (c % 100) << 1; - const uint32_t c1 = (c / 100) << 1; - const uint32_t d0 = (d % 100) << 1; - const uint32_t d1 = (d / 100) << 1; - - char *pos = a + olength - i; - - value = q; - - memcpy(pos - 2, DIGIT_TABLE + c0, 2); - memcpy(pos - 4, DIGIT_TABLE + c1, 2); - memcpy(pos - 6, DIGIT_TABLE + d0, 2); - memcpy(pos - 8, DIGIT_TABLE + d1, 2); - i += 8; - } - - /* Switch to 32-bit for speed */ - value2 = (uint32_t) value; - - if (value2 >= 10000) - { - const uint32_t c = value2 - 10000 * (value2 / 10000); - const uint32_t c0 = (c % 100) << 1; - const uint32_t c1 = (c / 100) << 1; - - char *pos = a + olength - i; - - value2 /= 10000; - - memcpy(pos - 2, DIGIT_TABLE + c0, 2); - memcpy(pos - 4, DIGIT_TABLE + c1, 2); - i += 4; - } - if (value2 >= 100) - { - const uint32_t c = (value2 % 100) << 1; - char *pos = a + olength - i; - - value2 /= 100; - - memcpy(pos - 2, DIGIT_TABLE + c, 2); - i += 2; - } - if (value2 >= 10) - { - const uint32_t c = value2 << 1; - char *pos = a + olength - i; - - memcpy(pos - 2, DIGIT_TABLE + c, 2); - } - else - *a = (char) ('0' + value2); - - return olength; -} - -/* - * pg_lltoa: converts a signed 64-bit integer to its string representation and - * returns strlen(a). - * - * Caller must ensure that 'a' points to enough memory to hold the result - * (at least MAXINT8LEN + 1 bytes, counting a leading sign and trailing NUL). - */ -int -pg_lltoa(int64_t value, char *a) -{ - uint64_t uvalue = value; - int len = 0; - - if (value < 0) - { - uvalue = (uint64_t) 0 - uvalue; - a[len++] = '-'; - } - - len += pg_ulltoa_n(uvalue, a + len); - a[len] = '\0'; - return len; -} diff --git a/psycopg_c/psycopg_c/types/string.pyx b/psycopg_c/psycopg_c/types/string.pyx deleted file mode 100644 index 896590ecf..000000000 --- a/psycopg_c/psycopg_c/types/string.pyx +++ /dev/null @@ -1,311 +0,0 @@ -""" -Cython adapters for textual types. -""" - -# Copyright (C) 2020 The Psycopg Team - -cimport cython -from libc.string cimport memchr, memcpy -from cpython.bytes cimport PyBytes_AsString, PyBytes_AsStringAndSize -from cpython.unicode cimport PyUnicode_AsEncodedString, PyUnicode_AsUTF8String -from cpython.unicode cimport PyUnicode_CheckExact, PyUnicode_Decode -from cpython.unicode cimport PyUnicode_DecodeUTF8 - -from psycopg_c.pq cimport Escaping, _buffer_as_string_and_size, libpq - -from psycopg import errors as e -from psycopg._encodings import pg2pyenc - - -cdef extern from "Python.h": - const char *PyUnicode_AsUTF8AndSize(unicode obj, Py_ssize_t *size) except NULL - - -cdef class _BaseStrDumper(CDumper): - cdef int is_utf8 - cdef char *encoding - cdef bytes _bytes_encoding # needed to keep `encoding` alive - - def __cinit__(self, cls, context: AdaptContext | None = None): - - self.is_utf8 = 0 - self.encoding = "utf-8" - cdef const char *pgenc - - if self._pgconn is not None: - pgenc = libpq.PQparameterStatus(self._pgconn._pgconn_ptr, b"client_encoding") - if pgenc == NULL or pgenc == b"UTF8": - self._bytes_encoding = b"utf-8" - self.is_utf8 = 1 - else: - self._bytes_encoding = pg2pyenc(pgenc).encode() - if self._bytes_encoding == b"ascii": - self.is_utf8 = 1 - self.encoding = PyBytes_AsString(self._bytes_encoding) - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - # the server will raise DataError subclass if the string contains 0x00 - cdef Py_ssize_t size; - cdef const char *src - - if self.is_utf8: - # Probably the fastest path, but doesn't work with subclasses - if PyUnicode_CheckExact(obj): - src = PyUnicode_AsUTF8AndSize(obj, &size) - else: - b = PyUnicode_AsUTF8String(obj) - PyBytes_AsStringAndSize(b, &src, &size) - else: - b = PyUnicode_AsEncodedString(obj, self.encoding, NULL) - PyBytes_AsStringAndSize(b, &src, &size) - - cdef char *buf = CDumper.ensure_size(rv, offset, size) - memcpy(buf, src, size) - return size - - -cdef class _StrBinaryDumper(_BaseStrDumper): - - format = PQ_BINARY - - -@cython.final -cdef class StrBinaryDumper(_StrBinaryDumper): - - oid = oids.TEXT_OID - - -@cython.final -cdef class StrBinaryDumperVarchar(_StrBinaryDumper): - - oid = oids.VARCHAR_OID - - -@cython.final -cdef class StrBinaryDumperName(_StrBinaryDumper): - - oid = oids.NAME_OID - - -cdef class _StrDumper(_BaseStrDumper): - - format = PQ_TEXT - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - cdef Py_ssize_t size = StrBinaryDumper.cdump(self, obj, rv, offset) - - # Like the binary dump, but check for 0, or the string will be truncated - cdef const char *buf = PyByteArray_AS_STRING(rv) - if NULL != memchr(buf + offset, 0x00, size): - raise e.DataError( - "PostgreSQL text fields cannot contain NUL (0x00) bytes" - ) - return size - - -@cython.final -cdef class StrDumper(_StrDumper): - - oid = oids.TEXT_OID - - -@cython.final -cdef class StrDumperVarchar(_StrDumper): - - oid = oids.VARCHAR_OID - - -@cython.final -cdef class StrDumperName(_StrDumper): - - oid = oids.NAME_OID - - -@cython.final -cdef class StrDumperUnknown(_StrDumper): - pass - - -cdef class _TextLoader(CLoader): - - format = PQ_TEXT - - cdef int is_utf8 - cdef char *encoding - cdef bytes _bytes_encoding # needed to keep `encoding` alive - - def __cinit__(self, oid: int, context: AdaptContext | None = None): - - self.is_utf8 = 0 - self.encoding = "utf-8" - cdef const char *pgenc - - if self._pgconn is not None: - pgenc = libpq.PQparameterStatus(self._pgconn._pgconn_ptr, b"client_encoding") - if pgenc == NULL or pgenc == b"UTF8": - self._bytes_encoding = b"utf-8" - self.is_utf8 = 1 - else: - self._bytes_encoding = pg2pyenc(pgenc).encode() - - if pgenc == b"SQL_ASCII": - self.encoding = NULL - else: - self.encoding = PyBytes_AsString(self._bytes_encoding) - - cdef object cload(self, const char *data, size_t length): - if self.is_utf8: - return PyUnicode_DecodeUTF8(data, length, NULL) - elif self.encoding: - return PyUnicode_Decode(data, length, self.encoding, NULL) - else: - return data[:length] - -@cython.final -cdef class TextLoader(_TextLoader): - - format = PQ_TEXT - - -@cython.final -cdef class TextBinaryLoader(_TextLoader): - - format = PQ_BINARY - - -@cython.final -cdef class BytesDumper(CDumper): - - format = PQ_TEXT - oid = oids.BYTEA_OID - - # 0: not set, 1: just single "'" quote, 3: " E'" quote - cdef int _qplen - - def __cinit__(self): - self._qplen = 0 - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - - cdef size_t len_out - cdef unsigned char *out - cdef char *ptr - cdef Py_ssize_t length - - _buffer_as_string_and_size(obj, &ptr, &length) - - if self._pgconn is not None and self._pgconn._pgconn_ptr != NULL: - out = libpq.PQescapeByteaConn( - self._pgconn._pgconn_ptr, ptr, length, &len_out) - else: - out = libpq.PQescapeBytea(ptr, length, &len_out) - - if out is NULL: - raise MemoryError( - f"couldn't allocate for escape_bytea of {length} bytes" - ) - - len_out -= 1 # out includes final 0 - cdef char *buf = CDumper.ensure_size(rv, offset, len_out) - memcpy(buf, out, len_out) - libpq.PQfreemem(out) - return len_out - - def quote(self, obj) -> Buffer: - cdef size_t len_out - cdef unsigned char *out - cdef char *ptr - cdef Py_ssize_t length - cdef const char *scs - - escaped = self.dump(obj) - _buffer_as_string_and_size(escaped, &ptr, &length) - - rv = PyByteArray_FromStringAndSize("", 0) - - # We cannot use the base quoting because escape_bytea already returns - # the quotes content. if scs is off it will escape the backslashes in - # the format, otherwise it won't, but it doesn't tell us what quotes to - # use. - if self._pgconn is not None: - if not self._qplen: - scs = libpq.PQparameterStatus(self._pgconn._pgconn_ptr, - b"standard_conforming_strings") - if scs and scs[0] == b'o' and scs[1] == b"n": # == "on" - self._qplen = 1 - else: - self._qplen = 3 - - PyByteArray_Resize(rv, length + self._qplen + 1) # Include quotes - ptr_out = PyByteArray_AS_STRING(rv) - if self._qplen == 1: - ptr_out[0] = b"'" - else: - ptr_out[0] = b" " - ptr_out[1] = b"E" - ptr_out[2] = b"'" - memcpy(ptr_out + self._qplen, ptr, length) - ptr_out[length + self._qplen] = b"'" - return rv - - # We don't have a connection, so someone is using us to generate a file - # to use off-line or something like that. PQescapeBytea, like its - # string counterpart, is not predictable whether it will escape - # backslashes. - PyByteArray_Resize(rv, length + 4) # Include quotes - ptr_out = PyByteArray_AS_STRING(rv) - ptr_out[0] = b" " - ptr_out[1] = b"E" - ptr_out[2] = b"'" - memcpy(ptr_out + 3, ptr, length) - ptr_out[length + 3] = b"'" - - esc = Escaping() - if esc.escape_bytea(b"\x00") == b"\\000": - rv = bytes(rv).replace(b"\\", b"\\\\") - - return rv - - -@cython.final -cdef class BytesBinaryDumper(CDumper): - - format = PQ_BINARY - oid = oids.BYTEA_OID - - cdef Py_ssize_t cdump(self, obj, bytearray rv, Py_ssize_t offset) except -1: - cdef char *src - cdef Py_ssize_t size; - _buffer_as_string_and_size(obj, &src, &size) - - cdef char *buf = CDumper.ensure_size(rv, offset, size) - memcpy(buf, src, size) - return size - - -@cython.final -cdef class ByteaLoader(CLoader): - - format = PQ_TEXT - - cdef object cload(self, const char *data, size_t length): - cdef size_t len_out - cdef unsigned char *out = libpq.PQunescapeBytea( - data, &len_out) - if out is NULL: - raise MemoryError( - f"couldn't allocate for unescape_bytea of {len(data)} bytes" - ) - - rv = out[:len_out] - libpq.PQfreemem(out) - return rv - - -@cython.final -cdef class ByteaBinaryLoader(CLoader): - - format = PQ_BINARY - - cdef object cload(self, const char *data, size_t length): - return data[:length] diff --git a/psycopg_c/psycopg_c/types/uuid.pyx b/psycopg_c/psycopg_c/types/uuid.pyx deleted file mode 100644 index 164be69c3..000000000 --- a/psycopg_c/psycopg_c/types/uuid.pyx +++ /dev/null @@ -1,97 +0,0 @@ -cimport cython -from cpython.long cimport PyLong_FromUnsignedLongLong - - -cdef extern from *: - """ -static const int8_t hex_to_int_map[] = { - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 0-15 - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 16-31 - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 32-47 - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, // 48-63 ('0'-'9') - -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 64-79 ('A'-'F') - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 80-95 - -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 96-111 ('a'-'f') - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 112-127 - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 128-143 - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 144-159 - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 160-175 - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 176-191 - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 192-207 - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 208-223 - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 224-239 - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 // 240-255 -}; -""" - const int8_t[256] hex_to_int_map - - -cdef class _UUIDLoader(CLoader): - - cdef object _object_new - cdef object _uuid_type - cdef PyObject *_wuuid_type - cdef object _safeuuid_unknown - - def __cinit__(self, oid: int, context: AdaptContext | None = None): - from psycopg_c import _uuid - - self._object_new = object.__new__ - self._uuid_type = _uuid.UUID - self._wuuid_type = _uuid._WritableUUID - self._safeuuid_unknown = _uuid.SafeUUID_unknown - - cdef object _return_uuid(self, uint64_t low, uint64_t high): - cdef object py_low = PyLong_FromUnsignedLongLong(low) - cdef object py_high = PyLong_FromUnsignedLongLong(high) - cdef object py_value = (py_high << 64) | py_low - - cdef object u = PyObject_CallFunctionObjArgs( - self._object_new, self._wuuid_type, NULL) - u.int = py_value - u.is_safe = self._safeuuid_unknown - u.__class__ = self._uuid_type - return u - - -@cython.final -cdef class UUIDLoader(_UUIDLoader): - format = PQ_TEXT - - cdef object cload(self, const char *data, size_t length): - cdef uint64_t high = 0 - cdef uint64_t low = 0 - cdef size_t i - cdef int ndigits = 0 - cdef int8_t c - - for i in range(length): - c = data[i] - if hex_to_int_map[c] == -1: - continue - - if ndigits < 16: - high = (high << 4) | hex_to_int_map[c] - else: - low = (low << 4) | hex_to_int_map[c] - ndigits += 1 - - if ndigits != 32: - raise ValueError("Invalid UUID string") - - return self._return_uuid(low, high) - - -@cython.final -cdef class UUIDBinaryLoader(_UUIDLoader): - format = PQ_BINARY - - cdef object cload(self, const char *data, size_t length): - cdef uint64_t be[2] - if length != sizeof(be): - raise ValueError("Invalid UUID data") - memcpy(&be, data, sizeof(be)) - - cdef uint64_t high = endian.be64toh(be[0]) - cdef uint64_t low = endian.be64toh(be[1]) - return self._return_uuid(low, high) diff --git a/psycopg_c/psycopg_c/version.py b/psycopg_c/psycopg_c/version.py deleted file mode 100644 index 47bdfbce3..000000000 --- a/psycopg_c/psycopg_c/version.py +++ /dev/null @@ -1,12 +0,0 @@ -""" -psycopg-c distribution version file. -""" - -# Copyright (C) 2020 The Psycopg Team - -from importlib import metadata - -try: - __version__ = metadata.version("psycopg-c") -except metadata.PackageNotFoundError: - __version__ = "0.0.0.0" diff --git a/psycopg_c/pyproject.toml b/psycopg_c/pyproject.toml deleted file mode 100644 index da8c48866..000000000 --- a/psycopg_c/pyproject.toml +++ /dev/null @@ -1,107 +0,0 @@ -[build-system] -requires = [ - # Note: pinning these versions strictly because of the setuptools warning: - # - # `[tool.setuptools.ext-modules]` in `pyproject.toml` is still - # *experimental* and likely to change in future releases - # - "setuptools == 75.6.0; python_version >= '3.9'", - "setuptools == 75.3.0; python_version < '3.9'", # last supported version - "wheel >= 0.37", - "tomli >= 2.0.1; python_version < '3.11'", -] - -# The cython_backend is a build backend adding a Cython dependency if the c -# source must be build from pxd files (when building from git checkout), and -# doesn't require Cython when needing to build from c files (when building -# from the sdist bundle). -build-backend = "cython_backend" -backend-path = ["build_backend"] - -[cython-backend] -# These packages are only installed if there are pyx files to compile. -cython-requires = ["Cython >= 3.0.0"] - -[project] -name = "psycopg-c" -description = "PostgreSQL database adapter for Python -- C optimisation distribution" -version = "3.3.0.dev1" -classifiers = [ - "Development Status :: 5 - Production/Stable", - "Intended Audience :: Developers", - "License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)", - "Operating System :: MacOS :: MacOS X", - "Operating System :: Microsoft :: Windows", - "Operating System :: POSIX", - "Programming Language :: Cython", - "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Programming Language :: Python :: 3.13", - "Programming Language :: Python :: Implementation :: CPython", - "Topic :: Database", - "Topic :: Database :: Front-Ends", - "Topic :: Software Development", - "Topic :: Software Development :: Libraries :: Python Modules", -] -requires-python = ">= 3.9" - -[[project.authors]] -name = "Daniele Varrazzo" -email = "daniele.varrazzo@gmail.com" - -[project.license] -text = "GNU Lesser General Public License v3 (LGPLv3)" - -[project.urls] -Homepage = "https://psycopg.org/" -Documentation = "https://psycopg.org/psycopg3/docs/" -Changes = "https://psycopg.org/psycopg3/docs/news.html" -Code = "https://github.com/psycopg/psycopg" -"Issue Tracker" = "https://github.com/psycopg/psycopg/issues" - -[project.readme] -file = "README.rst" -content-type = "text/x-rst" - -[tool.setuptools] -packages = [ - "psycopg_c", - "psycopg_c.pq", - "psycopg_c._psycopg", - "psycopg_c.types", -] -zip-safe = false -license-files = ["LICENSE.txt"] -include-package-data = true - -[tool.setuptools.package-data] -# NOTE: do not include .pyx files: they shouldn't be in the sdist -# package, so that build is only performed from the .c files (which are -# distributed instead). -psycopg_c = [ - "py.typed", - "*.pyi", - "*.pxd", - "_psycopg/*.pxd", - "pq/*.pxd", -] -psycopg_binary = [ - "py.typed", - "*.pyi", -] - -# Note: these ext modules lack details such as libraries and directories. -# They are added by the 'psycopg_build_ext' build module. -[[tool.setuptools.ext-modules]] -name = "psycopg_c._psycopg" -sources = ["psycopg_c/_psycopg.c", "psycopg_c/types/numutils.c"] - -[[tool.setuptools.ext-modules]] -name = "psycopg_c.pq" -sources = ["psycopg_c/pq.c"] - -[tool.setuptools.cmdclass] -build_ext = "psycopg_build_ext.psycopg_build_ext" diff --git a/psycopg_pool/.flake8 b/psycopg_pool/.flake8 deleted file mode 100644 index 67466af4d..000000000 --- a/psycopg_pool/.flake8 +++ /dev/null @@ -1,7 +0,0 @@ -[flake8] -max-line-length = 88 -ignore = W503, E203, E704 - -per-file-ignores = - # Allow concatenated string literals from async_to_sync - psycopg_pool/pool.py: E501 diff --git a/psycopg_pool/LICENSE.txt b/psycopg_pool/LICENSE.txt deleted file mode 100644 index 0a041280b..000000000 --- a/psycopg_pool/LICENSE.txt +++ /dev/null @@ -1,165 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/psycopg_pool/README.rst b/psycopg_pool/README.rst deleted file mode 100644 index 031173039..000000000 --- a/psycopg_pool/README.rst +++ /dev/null @@ -1,25 +0,0 @@ -Psycopg 3: PostgreSQL database adapter for Python - Connection Pool -=================================================================== - -This distribution package is an optional component of `Psycopg 3`__: it -contains the optional connection pool package `psycopg_pool`__. - -.. __: https://pypi.org/project/psycopg/ -.. __: https://www.psycopg.org/psycopg3/docs/advanced/pool.html - -This package is kept separate from the main ``psycopg`` package because it is -likely that it will follow a different release cycle. - -You can also install this package using:: - - pip install "psycopg[pool]" - -Please read `the project readme`__ and `the installation documentation`__ for -more details. - -.. __: https://github.com/psycopg/psycopg#readme -.. __: https://www.psycopg.org/psycopg3/docs/basic/install.html - #installing-the-connection-pool - - -Copyright (C) 2020 The Psycopg Team diff --git a/psycopg_pool/psycopg_pool/py.typed b/psycopg_pool/psycopg_pool/py.typed deleted file mode 100644 index e69de29bb..000000000 diff --git a/psycopg_pool/psycopg_pool/version.py b/psycopg_pool/psycopg_pool/version.py deleted file mode 100644 index 765e03605..000000000 --- a/psycopg_pool/psycopg_pool/version.py +++ /dev/null @@ -1,12 +0,0 @@ -""" -psycopg pool version file. -""" - -# Copyright (C) 2021 The Psycopg Team - -from importlib import metadata - -try: - __version__ = metadata.version("psycopg-pool") -except metadata.PackageNotFoundError: - __version__ = "0.0.0.0" diff --git a/pyproject.toml b/pyproject.toml index f3fa58e5d..d643c58d6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,8 +16,8 @@ testpaths=[ [tool.coverage.run] source = [ - "psycopg/psycopg", - "psycopg_pool/psycopg_pool", + "gaussdb/gaussdb", + "gaussdb_pool/gaussdb_pool", ] [tool.coverage.report] exclude_lines = [ @@ -27,9 +27,9 @@ exclude_lines = [ [tool.mypy] files = [ - "psycopg/psycopg", - "psycopg_pool/psycopg_pool", - "psycopg_c/psycopg_c", + "gaussdb/gaussdb", + "gaussdb_pool/gaussdb_pool", + "gaussdb_c/gaussdb_c", "tests", ] warn_unused_ignores = true @@ -57,10 +57,10 @@ disallow_untyped_calls = false [tool.codespell] ignore-words-list = "alot,ans,ba,fo,te,erro,varning" -skip = "build,_build,.tox,.mypy_cache,.venv,pq.c,_psycopg.c,*.html" +skip = "build,_build,.tox,.mypy_cache,.venv,pq.c,_gaussdb.c,*.html" [tool.isort] profile = "black" length_sort = true multi_line_output = 9 -sort_order = "psycopg" # requires the isort-psycopg module +sort_order = "gaussdb" # requires the isort-gaussdb module diff --git a/tests/README.rst b/tests/README.rst index 9c1dc90d4..cc6bac28e 100644 --- a/tests/README.rst +++ b/tests/README.rst @@ -1,22 +1,22 @@ -psycopg test suite +gaussdb test suite =================== Quick version ------------- To run tests on the current code you can install the ``test`` extra of the -package, specify a connection string in the ``PSYCOPG_TEST_DSN`` env var to +package, specify a connection string in the ``GAUSSDB_TEST_DSN`` env var to connect to a test database, and run ``pytest``:: - $ pip install -e "psycopg[test]" - $ export PSYCOPG_TEST_DSN="host=localhost dbname=psycopg_test" + $ pip install -e "gaussdb[test]" + $ export GAUSSDB_TEST_DSN="host=localhost dbname=gaussdb_test" $ pytest Test options ------------ -- The tests output header shows additional psycopg related information, +- The tests output header shows additional gaussdb related information, on top of the one normally displayed by ``pytest`` and the extensions used:: $ pytest @@ -27,18 +27,18 @@ Test options libpq wrapper implementation: c -- By default the tests run using the ``pq`` implementation that psycopg would +- By default the tests run using the ``pq`` implementation that gaussdb would choose (the C module if installed, else the Python one). In order to test a different implementation, use the normal `pq module selection mechanism`__ - of the ``PSYCOPG_IMPL`` env var:: + of the ``GAUSSDB_IMPL`` env var:: - $ PSYCOPG_IMPL=python pytest + $ GAUSSDB_IMPL=python pytest ========================= test session starts ========================= [...] libpq available: 130002 libpq wrapper implementation: python - .. __: https://www.psycopg.org/psycopg3/docs/api/pq.html#pq-module-implementations + .. __: https://www.gaussdb.org/gaussdb/docs/api/pq.html#pq-module-implementations - Slow tests have a ``slow`` marker which can be selected to reduce test @@ -79,10 +79,10 @@ test dsn in order to connect to a database running on the docker host: specify a set of env vars working for your setup:: $ docker run -ti --rm --volume `pwd`:/src --workdir /src \ - -e PSYCOPG_TEST_DSN -e PGHOST=172.17.0.1 -e PGUSER=`whoami` \ + -e GAUSSDB_TEST_DSN -e PGHOST=172.17.0.1 -e PGUSER=`whoami` \ python:3.9 bash - # pip install -e "./psycopg[test]" ./psycopg_pool ./psycopg_c + # pip install -e "./gaussdb[test]" ./gaussdb_pool ./gaussdb_c # pytest @@ -96,5 +96,5 @@ You can run CRDB in a docker container using:: And use the following connection string to run the tests:: - export PSYCOPG_TEST_DSN="host=localhost port=26257 user=root dbname=defaultdb" + export GAUSSDB_TEST_DSN="host=localhost port=26257 user=root dbname=defaultdb" pytest ... diff --git a/tests/_test_connection.py b/tests/_test_connection.py index 57257a5bc..b0177a5d1 100644 --- a/tests/_test_connection.py +++ b/tests/_test_connection.py @@ -9,15 +9,15 @@ import pytest -import psycopg -from psycopg.conninfo import conninfo_to_dict +import gaussdb +from gaussdb.conninfo import conninfo_to_dict try: - from psycopg.conninfo import _DEFAULT_CONNECT_TIMEOUT as DEFAULT_TIMEOUT + from gaussdb.conninfo import _DEFAULT_CONNECT_TIMEOUT as DEFAULT_TIMEOUT except ImportError: - # Allow tests to import (not necessarily to pass all) if the psycopg module - # imported is not the one expected (e.g. running psycopg pool tests on the - # master branch with psycopg 3.1.x imported). + # Allow tests to import (not necessarily to pass all) if the gaussdb module + # imported is not the one expected (e.g. running gaussdb pool tests on the + # master branch with gaussdb 3.1.x imported). DEFAULT_TIMEOUT = 130 @@ -39,7 +39,7 @@ class ParamDef: param_isolation = ParamDef( name="isolation_level", guc="isolation", - values=list(psycopg.IsolationLevel), + values=list(gaussdb.IsolationLevel), non_default="repeatable read", ) param_read_only = ParamDef( @@ -57,7 +57,7 @@ class ParamDef: # Map Python values to Postgres values for the tx_params possible values tx_values_map = { - v.name.lower().replace("_", " "): v.value for v in psycopg.IsolationLevel + v.name.lower().replace("_", " "): v.value for v in gaussdb.IsolationLevel } tx_values_map["on"] = True tx_values_map["off"] = False diff --git a/tests/_test_copy.py b/tests/_test_copy.py index 1ae425e7c..e24e0eabb 100644 --- a/tests/_test_copy.py +++ b/tests/_test_copy.py @@ -1,8 +1,8 @@ import struct -from psycopg import pq -from psycopg.copy import AsyncWriter -from psycopg.copy import FileWriter as FileWriter # noqa: F401 +from gaussdb import pq +from gaussdb.copy import AsyncWriter +from gaussdb.copy import FileWriter as FileWriter # noqa: F401 sample_records = [(40010, 40020, "hello"), (40040, None, "world")] sample_values = "values (40010::int, 40020::int, 'hello'::text), (40040, NULL, 'world')" diff --git a/tests/_test_cursor.py b/tests/_test_cursor.py index ee4b4acae..5fda19adc 100644 --- a/tests/_test_cursor.py +++ b/tests/_test_cursor.py @@ -9,8 +9,8 @@ import pytest -import psycopg -from psycopg.rows import RowMaker +import gaussdb +from gaussdb.rows import RowMaker @pytest.fixture(scope="session") @@ -32,7 +32,7 @@ def execmany(svcconn, _execmany): def ph(cur: Any, query: str) -> str: """Change placeholders in a query from %s to $n if testing a raw cursor""" - from psycopg.raw_cursor import RawCursorMixin + from gaussdb.raw_cursor import RawCursorMixin if not isinstance(cur, RawCursorMixin): return query @@ -52,7 +52,7 @@ def s(m: re.Match[str]) -> str: def my_row_factory( - cursor: psycopg.Cursor[list[str]] | psycopg.AsyncCursor[list[str]], + cursor: gaussdb.Cursor[list[str]] | gaussdb.AsyncCursor[list[str]], ) -> RowMaker[list[str]]: if cursor.description is not None: titles = [c.name for c in cursor.description] @@ -62,4 +62,4 @@ def mkrow(values): return mkrow else: - return psycopg.rows.no_result + return gaussdb.rows.no_result diff --git a/tests/_test_transaction.py b/tests/_test_transaction.py index 688dfec4e..c1262543f 100644 --- a/tests/_test_transaction.py +++ b/tests/_test_transaction.py @@ -2,8 +2,8 @@ import pytest -import psycopg -from psycopg import pq +import gaussdb +from gaussdb import pq # TODOCRDB: is this the expected behaviour? crdb_skip_external_observer = pytest.mark.crdb( @@ -23,7 +23,7 @@ def create_test_table(svcconn): def insert_row(conn, value): sql = "INSERT INTO test_table VALUES (%s)" - if isinstance(conn, psycopg.Connection): + if isinstance(conn, gaussdb.Connection): conn.cursor().execute(sql, (value,)) else: @@ -37,7 +37,7 @@ async def f(): def inserted(conn): """Return the values inserted in the test table.""" sql = "SELECT * FROM test_table" - if isinstance(conn, psycopg.Connection): + if isinstance(conn, gaussdb.Connection): rows = conn.cursor().execute(sql).fetchall() return {v for (v,) in rows} else: diff --git a/tests/adapters_example.py b/tests/adapters_example.py index 4d111c890..f9a0a8887 100644 --- a/tests/adapters_example.py +++ b/tests/adapters_example.py @@ -1,7 +1,7 @@ from __future__ import annotations -from psycopg import pq -from psycopg.abc import AdaptContext, Buffer, Dumper, Loader, PyFormat +from gaussdb import pq +from gaussdb.abc import AdaptContext, Buffer, Dumper, Loader, PyFormat def f() -> None: diff --git a/tests/conftest.py b/tests/conftest.py index 61e9f6320..c33e0e1ad 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -9,7 +9,7 @@ import pytest -from psycopg import pq +from gaussdb import pq pytest_plugins = ( "tests.fix_db", @@ -18,7 +18,7 @@ "tests.fix_mypy", "tests.fix_faker", "tests.fix_proxy", - "tests.fix_psycopg", + "tests.fix_gaussdb", "tests.fix_crdb", "tests.fix_gc", "tests.pool.fix_pool", @@ -31,7 +31,7 @@ def pytest_configure(config): "flakey(reason): this test may fail unpredictably')", # There are troubles on travis with these kind of tests and I cannot # catch the exception for my life. - "subprocess: the test import psycopg after subprocess", + "subprocess: the test import gaussdb after subprocess", "timing: the test is timing based and can fail on cheese hardware", "gevent: the test requires the gevent module to be installed", "dns: the test requires dnspython to run", @@ -113,7 +113,7 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config): def get_database_type(): - dsn = os.getenv("DSN") or os.getenv("PSYCOPG_TEST_DSN") + dsn = os.getenv("DSN") or os.getenv("GAUSSDB_TEST_DSN") if not dsn: print("DSN environment variable not set") return "" diff --git a/tests/crdb/test_adapt.py b/tests/crdb/test_adapt.py index 2a758fce1..457a58fd1 100644 --- a/tests/crdb/test_adapt.py +++ b/tests/crdb/test_adapt.py @@ -2,10 +2,10 @@ import pytest -from psycopg.crdb import CrdbConnection, adapters -from psycopg.adapt import PyFormat, Transformer -from psycopg.postgres import types as builtins -from psycopg.types.array import ListDumper +from gaussdb.crdb import CrdbConnection, adapters +from gaussdb.adapt import PyFormat, Transformer +from gaussdb.postgres import types as builtins +from gaussdb.types.array import ListDumper from ..test_adapt import MyStr, make_bin_dumper, make_bin_loader, make_dumper from ..test_adapt import make_loader diff --git a/tests/crdb/test_connection.py b/tests/crdb/test_connection.py index b935e8530..5de70a70f 100644 --- a/tests/crdb/test_connection.py +++ b/tests/crdb/test_connection.py @@ -5,9 +5,9 @@ import pytest -import psycopg.crdb -from psycopg import errors as e -from psycopg.crdb import CrdbConnection +import gaussdb.crdb +from gaussdb import errors as e +from gaussdb.crdb import CrdbConnection from ..acompat import gather, sleep, spawn @@ -23,7 +23,7 @@ def test_connect(dsn): with CrdbConnection.connect(dsn) as conn: assert isinstance(conn, CrdbConnection) - with psycopg.crdb.connect(dsn) as conn: + with gaussdb.crdb.connect(dsn) as conn: assert isinstance(conn, CrdbConnection) @@ -50,7 +50,7 @@ def test_broken_connection(conn): cur = conn.cursor() cur.execute("select session_id from [show session_id]") (session_id,) = cur.fetchone() - with pytest.raises(psycopg.DatabaseError): + with pytest.raises(gaussdb.DatabaseError): cur.execute("cancel session %s", [session_id]) assert conn.closed @@ -59,7 +59,7 @@ def test_broken_connection(conn): def test_broken(conn): cur = conn.execute("show session_id") (session_id,) = cur.fetchone() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): conn.execute("cancel session %s", [session_id]) assert conn.closed @@ -84,7 +84,7 @@ def closer(): t = spawn(closer) t0 = time.time() try: - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): conn.execute("select pg_sleep(3.0)") dt = time.time() - t0 # CRDB seems to take not less than 1s diff --git a/tests/crdb/test_connection_async.py b/tests/crdb/test_connection_async.py index f965ccc5b..e68f8cefa 100644 --- a/tests/crdb/test_connection_async.py +++ b/tests/crdb/test_connection_async.py @@ -2,9 +2,9 @@ import pytest -import psycopg.crdb -from psycopg import errors as e -from psycopg.crdb import AsyncCrdbConnection +import gaussdb.crdb +from gaussdb import errors as e +from gaussdb.crdb import AsyncCrdbConnection from ..acompat import asleep, gather, spawn @@ -23,7 +23,7 @@ async def test_connect(dsn): assert isinstance(conn, AsyncCrdbConnection) if False: # ASYNC - with psycopg.crdb.connect(dsn) as conn: + with gaussdb.crdb.connect(dsn) as conn: assert isinstance(conn, AsyncCrdbConnection) @@ -50,7 +50,7 @@ async def test_broken_connection(aconn): cur = aconn.cursor() await cur.execute("select session_id from [show session_id]") (session_id,) = await cur.fetchone() - with pytest.raises(psycopg.DatabaseError): + with pytest.raises(gaussdb.DatabaseError): await cur.execute("cancel session %s", [session_id]) assert aconn.closed @@ -59,7 +59,7 @@ async def test_broken_connection(aconn): async def test_broken(aconn): cur = await aconn.execute("show session_id") (session_id,) = await cur.fetchone() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): await aconn.execute("cancel session %s", [session_id]) assert aconn.closed @@ -84,7 +84,7 @@ async def closer(): t = spawn(closer) t0 = time.time() try: - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): await conn.execute("select pg_sleep(3.0)") dt = time.time() - t0 # CRDB seems to take not less than 1s diff --git a/tests/crdb/test_copy.py b/tests/crdb/test_copy.py index aa2d1024b..0d2d52496 100644 --- a/tests/crdb/test_copy.py +++ b/tests/crdb/test_copy.py @@ -7,10 +7,10 @@ import pytest -from psycopg import errors as e -from psycopg import pq, sql -from psycopg.adapt import PyFormat -from psycopg.types.numeric import Int4 +from gaussdb import errors as e +from gaussdb import pq, sql +from gaussdb.adapt import PyFormat +from gaussdb.types.numeric import Int4 from ..utils import eur from .._test_copy import sample_binary # noqa diff --git a/tests/crdb/test_copy_async.py b/tests/crdb/test_copy_async.py index 60c52ab52..2db38231c 100644 --- a/tests/crdb/test_copy_async.py +++ b/tests/crdb/test_copy_async.py @@ -4,10 +4,10 @@ import pytest -from psycopg import errors as e -from psycopg import pq, sql -from psycopg.adapt import PyFormat -from psycopg.types.numeric import Int4 +from gaussdb import errors as e +from gaussdb import pq, sql +from gaussdb.adapt import PyFormat +from gaussdb.types.numeric import Int4 from ..utils import eur from .._test_copy import sample_binary # noqa diff --git a/tests/crdb/test_cursor.py b/tests/crdb/test_cursor.py index b41de3023..4dc03b854 100644 --- a/tests/crdb/test_cursor.py +++ b/tests/crdb/test_cursor.py @@ -8,9 +8,9 @@ import pytest -from psycopg import errors as e -from psycopg import pq -from psycopg.rows import namedtuple_row +from gaussdb import errors as e +from gaussdb import pq +from gaussdb.rows import namedtuple_row from ..acompat import Queue, gather, spawn diff --git a/tests/crdb/test_cursor_async.py b/tests/crdb/test_cursor_async.py index 4c803fa87..3b35ca396 100644 --- a/tests/crdb/test_cursor_async.py +++ b/tests/crdb/test_cursor_async.py @@ -5,9 +5,9 @@ import pytest -from psycopg import errors as e -from psycopg import pq -from psycopg.rows import namedtuple_row +from gaussdb import errors as e +from gaussdb import pq +from gaussdb.rows import namedtuple_row from ..acompat import AQueue, gather, spawn diff --git a/tests/crdb/test_no_crdb.py b/tests/crdb/test_no_crdb.py index 622c6409c..5b3b81e33 100644 --- a/tests/crdb/test_no_crdb.py +++ b/tests/crdb/test_no_crdb.py @@ -1,7 +1,7 @@ import pytest -from psycopg.pq import TransactionStatus -from psycopg.crdb import CrdbConnection +from gaussdb.pq import TransactionStatus +from gaussdb.crdb import CrdbConnection pytestmark = pytest.mark.crdb("skip") diff --git a/tests/crdb/test_typing.py b/tests/crdb/test_typing.py index 2cff0a735..e36049c4e 100644 --- a/tests/crdb/test_typing.py +++ b/tests/crdb/test_typing.py @@ -7,32 +7,32 @@ "conn, type", [ ( - "psycopg.crdb.connect()", - "psycopg.crdb.CrdbConnection[Tuple[Any, ...]]", + "gaussdb.crdb.connect()", + "gaussdb.crdb.CrdbConnection[Tuple[Any, ...]]", ), ( - "psycopg.crdb.connect(row_factory=rows.dict_row)", - "psycopg.crdb.CrdbConnection[Dict[str, Any]]", + "gaussdb.crdb.connect(row_factory=rows.dict_row)", + "gaussdb.crdb.CrdbConnection[Dict[str, Any]]", ), ( - "psycopg.crdb.CrdbConnection.connect()", - "psycopg.crdb.CrdbConnection[Tuple[Any, ...]]", + "gaussdb.crdb.CrdbConnection.connect()", + "gaussdb.crdb.CrdbConnection[Tuple[Any, ...]]", ), ( - "psycopg.crdb.CrdbConnection.connect(row_factory=rows.tuple_row)", - "psycopg.crdb.CrdbConnection[Tuple[Any, ...]]", + "gaussdb.crdb.CrdbConnection.connect(row_factory=rows.tuple_row)", + "gaussdb.crdb.CrdbConnection[Tuple[Any, ...]]", ), ( - "psycopg.crdb.CrdbConnection.connect(row_factory=rows.dict_row)", - "psycopg.crdb.CrdbConnection[Dict[str, Any]]", + "gaussdb.crdb.CrdbConnection.connect(row_factory=rows.dict_row)", + "gaussdb.crdb.CrdbConnection[Dict[str, Any]]", ), ( - "await psycopg.crdb.AsyncCrdbConnection.connect()", - "psycopg.crdb.AsyncCrdbConnection[Tuple[Any, ...]]", + "await gaussdb.crdb.AsyncCrdbConnection.connect()", + "gaussdb.crdb.AsyncCrdbConnection[Tuple[Any, ...]]", ), ( - "await psycopg.crdb.AsyncCrdbConnection.connect(row_factory=rows.dict_row)", - "psycopg.crdb.AsyncCrdbConnection[Dict[str, Any]]", + "await gaussdb.crdb.AsyncCrdbConnection.connect(row_factory=rows.dict_row)", + "gaussdb.crdb.AsyncCrdbConnection[Dict[str, Any]]", ), ], ) @@ -43,7 +43,7 @@ def test_connection_type(conn, type, mypy): def _test_reveal_crdb(stmts, type, mypy): stmts = f"""\ -import psycopg.crdb +import gaussdb.crdb {stmts} """ _test_reveal(stmts, type, mypy) diff --git a/tests/fix_crdb.py b/tests/fix_crdb.py index f1679f6f0..bacd8812f 100644 --- a/tests/fix_crdb.py +++ b/tests/fix_crdb.py @@ -2,7 +2,7 @@ import pytest -from psycopg.crdb import CrdbConnection +from gaussdb.crdb import CrdbConnection from .utils import VersionCheck diff --git a/tests/fix_db.py b/tests/fix_db.py index 41bb3ae35..c99b2a0fd 100644 --- a/tests/fix_db.py +++ b/tests/fix_db.py @@ -9,10 +9,10 @@ import pytest -import psycopg -from psycopg import pq, sql -from psycopg.conninfo import conninfo_to_dict, make_conninfo -from psycopg.pq._debug import PGconnDebug +import gaussdb +from gaussdb import pq, sql +from gaussdb.conninfo import conninfo_to_dict, make_conninfo +from gaussdb.pq._debug import PGconnDebug from .utils import check_postgres_version @@ -25,10 +25,10 @@ def pytest_addoption(parser): parser.addoption( "--test-dsn", metavar="DSN", - default=os.environ.get("PSYCOPG_TEST_DSN"), + default=os.environ.get("GAUSSDB_TEST_DSN"), help=( "Connection string to run database tests requiring a connection" - " [you can also use the PSYCOPG_TEST_DSN env var]." + " [you can also use the GAUSSDB_TEST_DSN env var]." ), ) parser.addoption( @@ -41,7 +41,7 @@ def pytest_addoption(parser): "--pq-debug", action="store_true", default=False, - help="Log PGconn access. (Requires PSYCOPG_IMPL=python.)", + help="Log PGconn access. (Requires GAUSSDB_IMPL=python.)", ) @@ -51,7 +51,7 @@ def pytest_report_header(config): return [] try: - with psycopg.connect(dsn, connect_timeout=10) as conn: + with gaussdb.connect(dsn, connect_timeout=10) as conn: server_version = conn.execute("select version()").fetchall()[0][0] except Exception as ex: server_version = f"unknown ({ex})" @@ -71,8 +71,8 @@ def pytest_collection_modifyitems(items): def pytest_runtest_setup(item): # Note: not using Capabilities.has_pipeline() to allow running the tests - # with Psycopg 3.1. - if not psycopg.Pipeline.is_supported(): + # with gaussdb.1. + if not gaussdb.Pipeline.is_supported(): for m in item.iter_markers(name="pipeline"): pytest.skip("pipeline mode not supported") @@ -167,7 +167,7 @@ def maybe_trace(pgconn, tracefile, function): pgconn.trace(tracefile.fileno()) try: pgconn.set_trace_flags(pq.Trace.SUPPRESS_TIMESTAMPS | pq.Trace.REGRESS_MODE) - except psycopg.NotSupportedError: + except gaussdb.NotSupportedError: pass try: yield None @@ -180,9 +180,9 @@ def pgconn_debug(request): if not request.config.getoption("--pq-debug"): return if pq.__impl__ != "python": - raise pytest.UsageError("set PSYCOPG_IMPL=python to use --pq-debug") + raise pytest.UsageError("set GAUSSDB_IMPL=python to use --pq-debug") logging.basicConfig(level=logging.INFO, format="%(message)s") - logger = logging.getLogger("psycopg.debug") + logger = logging.getLogger("gaussdb.debug") logger.setLevel(logging.INFO) pq.PGconn = PGconnDebug @@ -216,7 +216,7 @@ def conn(conn_cls, dsn, request, tracefile): @pytest.fixture(params=[True, False], ids=["pipeline=on", "pipeline=off"]) def pipeline(request, conn): if request.param: - if not psycopg.Pipeline.is_supported(): + if not gaussdb.Pipeline.is_supported(): pytest.skip("pipeline mode not supported") with conn.pipeline() as p: yield p @@ -239,7 +239,7 @@ async def aconn(dsn, aconn_cls, request, tracefile): @pytest.fixture(params=[True, False], ids=["pipeline=on", "pipeline=off"]) async def apipeline(request, aconn): if request.param: - if not psycopg.Pipeline.is_supported(): + if not gaussdb.Pipeline.is_supported(): pytest.skip("pipeline mode not supported") async with aconn.pipeline() as p: yield p @@ -250,9 +250,9 @@ async def apipeline(request, aconn): @pytest.fixture(scope="session") def conn_cls(session_dsn): - cls = psycopg.Connection + cls = gaussdb.Connection if crdb_version: - from psycopg.crdb import CrdbConnection + from gaussdb.crdb import CrdbConnection cls = CrdbConnection @@ -261,9 +261,9 @@ def conn_cls(session_dsn): @pytest.fixture(scope="session") def aconn_cls(session_dsn, anyio_backend): - cls = psycopg.AsyncConnection + cls = gaussdb.AsyncConnection if crdb_version: - from psycopg.crdb import AsyncCrdbConnection + from gaussdb.crdb import AsyncCrdbConnection cls = AsyncCrdbConnection @@ -347,7 +347,7 @@ def hstore(svcconn): try: with svcconn.transaction(): svcconn.execute("create extension if not exists hstore") - except psycopg.Error as e: + except gaussdb.Error as e: pytest.skip(str(e)) @@ -364,7 +364,7 @@ def warm_up_database(dsn: str) -> None: global pg_version, crdb_version try: - with psycopg.connect(dsn, connect_timeout=10) as conn: + with gaussdb.connect(dsn, connect_timeout=10) as conn: conn.execute("select 1") pg_version = conn.info.server_version @@ -372,7 +372,7 @@ def warm_up_database(dsn: str) -> None: crdb_version = None param = conn.info.parameter_status("crdb_version") if param: - from psycopg.crdb import CrdbConnectionInfo + from gaussdb.crdb import CrdbConnectionInfo crdb_version = CrdbConnectionInfo.parse_crdb_version(param) except Exception as exc: diff --git a/tests/fix_faker.py b/tests/fix_faker.py index 7f653b304..33097e86e 100644 --- a/tests/fix_faker.py +++ b/tests/fix_faker.py @@ -13,13 +13,13 @@ import pytest -import psycopg -from psycopg import sql -from psycopg.adapt import PyFormat -from psycopg.types.json import Json, Jsonb -from psycopg.types.range import Range -from psycopg.types.numeric import Int4, Int8 -from psycopg.types.multirange import Multirange +import gaussdb +from gaussdb import sql +from gaussdb.adapt import PyFormat +from gaussdb.types.json import Json, Jsonb +from gaussdb.types.range import Range +from gaussdb.types.numeric import Int4, Int8 +from gaussdb.types.multirange import Multirange @pytest.fixture @@ -85,7 +85,7 @@ def types_names_sql(self): return self._types_names record = self.make_record(nulls=0) - tx = psycopg.adapt.Transformer(self.conn) + tx = gaussdb.adapt.Transformer(self.conn) types = [ self._get_type_name(tx, schema, value) for schema, value in zip(self.schema, record) @@ -147,8 +147,8 @@ def find_insert_problem(self, conn): try: with conn.transaction(): yield - except psycopg.DatabaseError: - cur = psycopg.Cursor(conn) + except gaussdb.DatabaseError: + cur = gaussdb.Cursor(conn) # Repeat insert one field at time, until finding the wrong one cur.execute(self.drop_stmt) cur.execute(self.create_stmt) @@ -156,7 +156,7 @@ def find_insert_problem(self, conn): for j, val in enumerate(rec): try: cur.execute(self._insert_field_stmt(j), (val,)) - except psycopg.DatabaseError as e: + except gaussdb.DatabaseError as e: r = repr(val) if len(r) > 200: r = f"{r[:200]}... ({len(r)} chars)" @@ -172,8 +172,8 @@ async def find_insert_problem_async(self, aconn): try: async with aconn.transaction(): yield - except psycopg.DatabaseError: - acur = psycopg.AsyncCursor(aconn) + except gaussdb.DatabaseError: + acur = gaussdb.AsyncCursor(aconn) # Repeat insert one field at time, until finding the wrong one await acur.execute(self.drop_stmt) await acur.execute(self.create_stmt) @@ -181,7 +181,7 @@ async def find_insert_problem_async(self, aconn): for j, val in enumerate(rec): try: await acur.execute(self._insert_field_stmt(j), (val,)) - except psycopg.DatabaseError as e: + except gaussdb.DatabaseError as e: r = repr(val) if len(r) > 200: r = f"{r[:200]}... ({len(r)} chars)" diff --git a/tests/fix_psycopg.py b/tests/fix_gaussdb.py similarity index 89% rename from tests/fix_psycopg.py rename to tests/fix_gaussdb.py index f7934603e..e9952e478 100644 --- a/tests/fix_psycopg.py +++ b/tests/fix_gaussdb.py @@ -6,7 +6,7 @@ @pytest.fixture def global_adapters(): """Restore the global adapters after a test has changed them.""" - from psycopg import adapters + from gaussdb import adapters dumpers = deepcopy(adapters._dumpers) dumpers_by_oid = deepcopy(adapters._dumpers_by_oid) @@ -52,7 +52,7 @@ def check_tpc(self): def clear_test_xacts(self): """Rollback all the prepared transaction in the testing db.""" - from psycopg import sql + from gaussdb import sql cur = self.conn.execute( "select gid from pg_prepared_xacts where database = %s", @@ -84,14 +84,14 @@ def count_test_records(self): @pytest.fixture(scope="module") def generators(): - """Return the 'generators' module for selected psycopg implementation.""" - from psycopg import pq + """Return the 'generators' module for selected gaussdb implementation.""" + from gaussdb import pq if pq.__impl__ == "c": - from psycopg._cmodule import _psycopg + from gaussdb._cmodule import _gaussdb - return _psycopg + return _gaussdb else: - import psycopg.generators + import gaussdb.generators - return psycopg.generators + return gaussdb.generators diff --git a/tests/fix_pq.py b/tests/fix_pq.py index 8cc1f8a88..e7b5c0e4b 100644 --- a/tests/fix_pq.py +++ b/tests/fix_pq.py @@ -12,14 +12,14 @@ from .utils import check_libpq_version try: - from psycopg import pq + from gaussdb import pq except ImportError: pq = None # type: ignore def pytest_report_header(config): try: - from psycopg import pq + from gaussdb import pq except ImportError: return [] @@ -51,7 +51,7 @@ def pytest_runtest_setup(item): def libpq(): """Return a ctypes wrapper to access the libpq.""" try: - from psycopg.pq.misc import find_libpq_full_path + from gaussdb.pq.misc import find_libpq_full_path # Not available when testing the binary package libname = find_libpq_full_path() diff --git a/tests/fix_proxy.py b/tests/fix_proxy.py index dbc03a9f4..e1cefb478 100644 --- a/tests/fix_proxy.py +++ b/tests/fix_proxy.py @@ -8,8 +8,8 @@ import pytest -import psycopg -from psycopg import conninfo +import gaussdb +from gaussdb import conninfo def pytest_collection_modifyitems(items): @@ -86,7 +86,7 @@ def start(self): # verify that the proxy works try: - with psycopg.connect(self.client_dsn): + with gaussdb.connect(self.client_dsn): pass except Exception as e: pytest.fail(f"failed to create a working proxy: {e}") diff --git a/tests/pool/fix_pool.py b/tests/pool/fix_pool.py index 12e4f3941..c5aecfa6e 100644 --- a/tests/pool/fix_pool.py +++ b/tests/pool/fix_pool.py @@ -2,7 +2,7 @@ def pytest_configure(config): - config.addinivalue_line("markers", "pool: test related to the psycopg_pool package") + config.addinivalue_line("markers", "pool: test related to the gaussdb_pool package") def pytest_collection_modifyitems(items): diff --git a/tests/pool/test_module.py b/tests/pool/test_module.py index 31f77aab3..84d9a65fd 100644 --- a/tests/pool/test_module.py +++ b/tests/pool/test_module.py @@ -1,7 +1,7 @@ def test_version(mypy): cp = mypy.run_on_source( """\ -from psycopg_pool import __version__ +from gaussdb_pool import __version__ assert __version__ """ ) diff --git a/tests/pool/test_pool.py b/tests/pool/test_pool.py index 3248bcaf7..6f55a353b 100644 --- a/tests/pool/test_pool.py +++ b/tests/pool/test_pool.py @@ -11,16 +11,16 @@ import pytest -import psycopg -from psycopg.pq import TransactionStatus -from psycopg.rows import Row, TupleRow, class_row +import gaussdb +from gaussdb.pq import TransactionStatus +from gaussdb.rows import Row, TupleRow, class_row from ..utils import assert_type, set_autocommit from ..acompat import Event, gather, skip_sync, sleep, spawn from .test_pool_common import delay_connection try: - import psycopg_pool as pool + import gaussdb_pool as pool except ImportError: # Tests should have been skipped if the package is not available pass @@ -53,10 +53,10 @@ class MyRow(dict[str, Any]): def test_generic_connection_type(dsn): - def configure(conn: psycopg.Connection[Any]) -> None: + def configure(conn: gaussdb.Connection[Any]) -> None: set_autocommit(conn, True) - class MyConnection(psycopg.Connection[Row]): + class MyConnection(gaussdb.Connection[Row]): pass with pool.ConnectionPool( @@ -86,10 +86,10 @@ class MyConnection(psycopg.Connection[Row]): def test_non_generic_connection_type(dsn): - def configure(conn: psycopg.Connection[Any]) -> None: + def configure(conn: gaussdb.Connection[Any]) -> None: set_autocommit(conn, True) - class MyConnection(psycopg.Connection[MyRow]): + class MyConnection(gaussdb.Connection[MyRow]): def __init__(self, *args: Any, **kwargs: Any): kwargs["row_factory"] = class_row(MyRow) @@ -233,7 +233,7 @@ def reset(conn): @pytest.mark.opengauss_skip("backend pid") @pytest.mark.crdb_skip("backend pid") def test_reset_badstate(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") def reset(conn): conn.execute("reset all") @@ -256,7 +256,7 @@ def reset(conn): @pytest.mark.opengauss_skip("backend pid") @pytest.mark.crdb_skip("backend pid") def test_reset_broken(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") def reset(conn): with conn.transaction(): @@ -280,7 +280,7 @@ def reset(conn): @pytest.mark.opengauss_skip("backend pid") @pytest.mark.crdb_skip("backend pid") def test_intrans_rollback(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") with pool.ConnectionPool(dsn, min_size=1) as p: conn = p.getconn() @@ -303,12 +303,12 @@ def test_intrans_rollback(dsn, caplog): @pytest.mark.crdb_skip("backend pid") def test_inerror_rollback(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") with pool.ConnectionPool(dsn, min_size=1) as p: conn = p.getconn() pid = conn.info.backend_pid - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): conn.execute("wat") assert conn.info.transaction_status == TransactionStatus.INERROR p.putconn(conn) @@ -326,7 +326,7 @@ def test_inerror_rollback(dsn, caplog): @pytest.mark.crdb_skip("backend pid") @pytest.mark.crdb_skip("copy") def test_active_close(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") with pool.ConnectionPool(dsn, min_size=1) as p: conn = p.getconn() @@ -348,7 +348,7 @@ def test_active_close(dsn, caplog): @pytest.mark.opengauss_skip("backend pid") @pytest.mark.crdb_skip("backend pid") def test_fail_rollback_close(dsn, caplog, monkeypatch): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") with pool.ConnectionPool(dsn, min_size=1) as p: conn = p.getconn() @@ -362,7 +362,7 @@ def bad_rollback(): monkeypatch.setattr(conn, "rollback", bad_rollback) pid = conn.info.backend_pid - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): conn.execute("wat") assert conn.info.transaction_status == TransactionStatus.INERROR p.putconn(conn) @@ -461,7 +461,7 @@ def worker(n): @pytest.mark.slow @pytest.mark.timing def test_shrink(dsn, monkeypatch): - from psycopg_pool.pool import ShrinkPool + from gaussdb_pool.pool import ShrinkPool results: list[tuple[int, int]] = [] @@ -493,7 +493,7 @@ def worker(n): @pytest.mark.slow @pytest.mark.timing def test_reconnect(proxy, caplog, monkeypatch): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") assert pool.base.AttemptWithBackoff.INITIAL_DELAY == 1.0 assert pool.base.AttemptWithBackoff.DELAY_JITTER == 0.1 @@ -506,7 +506,7 @@ def test_reconnect(proxy, caplog, monkeypatch): p.wait(2.0) proxy.stop() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): with p.connection() as conn: conn.execute("select 1") @@ -560,7 +560,7 @@ def failed(pool): p.wait(2.0) proxy.stop() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): with p.connection() as conn: conn.execute("select 1") @@ -723,7 +723,7 @@ def test_max_lifetime(dsn): @pytest.mark.opengauss_skip("backend pid") @pytest.mark.crdb_skip("backend pid") def test_check(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") with pool.ConnectionPool(dsn, min_size=4) as p: p.wait(1.0) with p.connection() as conn: @@ -762,7 +762,7 @@ def test_connect_no_check(dsn): pid2 = conn2.info.backend_pid conn.execute("select pg_terminate_backend(%s)", [pid2]) - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): with p.connection() as conn: conn.execute("select 1") with p.connection() as conn2: @@ -774,7 +774,7 @@ def test_connect_no_check(dsn): @pytest.mark.crdb_skip("pg_terminate_backend") @pytest.mark.parametrize("autocommit", [True, False]) def test_connect_check(dsn, caplog, autocommit): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") with pool.ConnectionPool( dsn, @@ -808,7 +808,7 @@ def test_connect_check(dsn, caplog, autocommit): @pytest.mark.gaussdb_skip("pg_terminate_backend") @pytest.mark.opengauss_skip("pg_terminate_backend") def test_getconn_check(dsn, caplog, autocommit): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") with pool.ConnectionPool( dsn, @@ -914,7 +914,7 @@ def test_stats_check(dsn): with p.connection() as conn: pid = conn.info.backend_pid - with psycopg.Connection.connect(dsn) as conn: + with gaussdb.Connection.connect(dsn) as conn: conn.execute("select pg_terminate_backend(%s)", [pid]) with p.connection() as conn: @@ -945,8 +945,8 @@ def worker(): def test_debug_deadlock(dsn): - # https://github.com/psycopg/psycopg/issues/230 - logger = logging.getLogger("psycopg") + # https://github.com/gaussdb/gaussdb/issues/230 + logger = logging.getLogger("gaussdb") handler = logging.StreamHandler() old_level = logger.level logger.setLevel(logging.DEBUG) @@ -962,7 +962,7 @@ def test_debug_deadlock(dsn): @skip_sync def test_cancellation_in_queue(dsn): - # https://github.com/psycopg/psycopg/issues/509 + # https://github.com/gaussdb/gaussdb/issues/509 nconns = 3 @@ -1018,7 +1018,7 @@ def worker(i): @pytest.mark.slow @pytest.mark.timing def test_check_backoff(dsn, caplog, monkeypatch): - caplog.set_level(logging.INFO, logger="psycopg.pool") + caplog.set_level(logging.INFO, logger="gaussdb.pool") assert pool.base.AttemptWithBackoff.INITIAL_DELAY == 1.0 assert pool.base.AttemptWithBackoff.DELAY_JITTER == 0.1 diff --git a/tests/pool/test_pool_async.py b/tests/pool/test_pool_async.py index 9ca1473b7..f5c1c7ed3 100644 --- a/tests/pool/test_pool_async.py +++ b/tests/pool/test_pool_async.py @@ -8,16 +8,16 @@ import pytest -import psycopg -from psycopg.pq import TransactionStatus -from psycopg.rows import Row, TupleRow, class_row +import gaussdb +from gaussdb.pq import TransactionStatus +from gaussdb.rows import Row, TupleRow, class_row from ..utils import assert_type, set_autocommit from ..acompat import AEvent, asleep, gather, skip_sync, spawn from .test_pool_common_async import delay_connection try: - import psycopg_pool as pool + import gaussdb_pool as pool except ImportError: # Tests should have been skipped if the package is not available pass @@ -49,10 +49,10 @@ class MyRow(dict[str, Any]): async def test_generic_connection_type(dsn): - async def configure(conn: psycopg.AsyncConnection[Any]) -> None: + async def configure(conn: gaussdb.AsyncConnection[Any]) -> None: await set_autocommit(conn, True) - class MyConnection(psycopg.AsyncConnection[Row]): + class MyConnection(gaussdb.AsyncConnection[Row]): pass async with pool.AsyncConnectionPool( @@ -83,10 +83,10 @@ class MyConnection(psycopg.AsyncConnection[Row]): async def test_non_generic_connection_type(dsn): - async def configure(conn: psycopg.AsyncConnection[Any]) -> None: + async def configure(conn: gaussdb.AsyncConnection[Any]) -> None: await set_autocommit(conn, True) - class MyConnection(psycopg.AsyncConnection[MyRow]): + class MyConnection(gaussdb.AsyncConnection[MyRow]): def __init__(self, *args: Any, **kwargs: Any): kwargs["row_factory"] = class_row(MyRow) super().__init__(*args, **kwargs) @@ -231,7 +231,7 @@ async def reset(conn): @pytest.mark.opengauss_skip("backend pid") @pytest.mark.crdb_skip("backend pid") async def test_reset_badstate(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") async def reset(conn): await conn.execute("reset all") @@ -254,7 +254,7 @@ async def reset(conn): @pytest.mark.opengauss_skip("backend pid") @pytest.mark.crdb_skip("backend pid") async def test_reset_broken(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") async def reset(conn): async with conn.transaction(): @@ -278,7 +278,7 @@ async def reset(conn): @pytest.mark.opengauss_skip("backend pid") @pytest.mark.crdb_skip("backend pid") async def test_intrans_rollback(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") async with pool.AsyncConnectionPool(dsn, min_size=1) as p: conn = await p.getconn() @@ -301,12 +301,12 @@ async def test_intrans_rollback(dsn, caplog): @pytest.mark.crdb_skip("backend pid") async def test_inerror_rollback(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") async with pool.AsyncConnectionPool(dsn, min_size=1) as p: conn = await p.getconn() pid = conn.info.backend_pid - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): await conn.execute("wat") assert conn.info.transaction_status == TransactionStatus.INERROR await p.putconn(conn) @@ -324,7 +324,7 @@ async def test_inerror_rollback(dsn, caplog): @pytest.mark.crdb_skip("backend pid") @pytest.mark.crdb_skip("copy") async def test_active_close(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") async with pool.AsyncConnectionPool(dsn, min_size=1) as p: conn = await p.getconn() @@ -346,7 +346,7 @@ async def test_active_close(dsn, caplog): @pytest.mark.opengauss_skip("backend pid") @pytest.mark.crdb_skip("backend pid") async def test_fail_rollback_close(dsn, caplog, monkeypatch): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") async with pool.AsyncConnectionPool(dsn, min_size=1) as p: conn = await p.getconn() @@ -360,7 +360,7 @@ async def bad_rollback(): monkeypatch.setattr(conn, "rollback", bad_rollback) pid = conn.info.backend_pid - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): await conn.execute("wat") assert conn.info.transaction_status == TransactionStatus.INERROR await p.putconn(conn) @@ -461,7 +461,7 @@ async def worker(n): @pytest.mark.slow @pytest.mark.timing async def test_shrink(dsn, monkeypatch): - from psycopg_pool.pool_async import ShrinkPool + from gaussdb_pool.pool_async import ShrinkPool results: list[tuple[int, int]] = [] @@ -493,7 +493,7 @@ async def worker(n): @pytest.mark.slow @pytest.mark.timing async def test_reconnect(proxy, caplog, monkeypatch): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") assert pool.base.AttemptWithBackoff.INITIAL_DELAY == 1.0 assert pool.base.AttemptWithBackoff.DELAY_JITTER == 0.1 @@ -506,7 +506,7 @@ async def test_reconnect(proxy, caplog, monkeypatch): await p.wait(2.0) proxy.stop() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): async with p.connection() as conn: await conn.execute("select 1") @@ -560,7 +560,7 @@ def failed(pool): await p.wait(2.0) proxy.stop() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): async with p.connection() as conn: await conn.execute("select 1") @@ -722,7 +722,7 @@ async def test_max_lifetime(dsn): @pytest.mark.opengauss_skip("backend pid") @pytest.mark.crdb_skip("backend pid") async def test_check(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") async with pool.AsyncConnectionPool(dsn, min_size=4) as p: await p.wait(1.0) async with p.connection() as conn: @@ -761,7 +761,7 @@ async def test_connect_no_check(dsn): pid2 = conn2.info.backend_pid await conn.execute("select pg_terminate_backend(%s)", [pid2]) - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): async with p.connection() as conn: await conn.execute("select 1") async with p.connection() as conn2: @@ -773,7 +773,7 @@ async def test_connect_no_check(dsn): @pytest.mark.crdb_skip("pg_terminate_backend") @pytest.mark.parametrize("autocommit", [True, False]) async def test_connect_check(dsn, caplog, autocommit): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") async with pool.AsyncConnectionPool( dsn, @@ -807,7 +807,7 @@ async def test_connect_check(dsn, caplog, autocommit): @pytest.mark.gaussdb_skip("pg_terminate_backend") @pytest.mark.opengauss_skip("pg_terminate_backend") async def test_getconn_check(dsn, caplog, autocommit): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") async with pool.AsyncConnectionPool( dsn, @@ -913,7 +913,7 @@ async def test_stats_check(dsn): async with p.connection() as conn: pid = conn.info.backend_pid - async with await psycopg.AsyncConnection.connect(dsn) as conn: + async with await gaussdb.AsyncConnection.connect(dsn) as conn: await conn.execute("select pg_terminate_backend(%s)", [pid]) async with p.connection() as conn: @@ -944,8 +944,8 @@ async def worker(): async def test_debug_deadlock(dsn): - # https://github.com/psycopg/psycopg/issues/230 - logger = logging.getLogger("psycopg") + # https://github.com/gaussdb/gaussdb/issues/230 + logger = logging.getLogger("gaussdb") handler = logging.StreamHandler() old_level = logger.level logger.setLevel(logging.DEBUG) @@ -961,7 +961,7 @@ async def test_debug_deadlock(dsn): @skip_sync async def test_cancellation_in_queue(dsn): - # https://github.com/psycopg/psycopg/issues/509 + # https://github.com/gaussdb/gaussdb/issues/509 nconns = 3 @@ -1018,7 +1018,7 @@ async def worker(i): @pytest.mark.slow @pytest.mark.timing async def test_check_backoff(dsn, caplog, monkeypatch): - caplog.set_level(logging.INFO, logger="psycopg.pool") + caplog.set_level(logging.INFO, logger="gaussdb.pool") assert pool.base.AttemptWithBackoff.INITIAL_DELAY == 1.0 assert pool.base.AttemptWithBackoff.DELAY_JITTER == 0.1 diff --git a/tests/pool/test_pool_async_noasyncio.py b/tests/pool/test_pool_async_noasyncio.py index dd79d9741..7077d6039 100644 --- a/tests/pool/test_pool_async_noasyncio.py +++ b/tests/pool/test_pool_async_noasyncio.py @@ -7,7 +7,7 @@ import pytest try: - import psycopg_pool as pool + import gaussdb_pool as pool except ImportError: # Tests should have been skipped if the package is not available pass diff --git a/tests/pool/test_pool_common.py b/tests/pool/test_pool_common.py index 789d93b0a..78242757a 100644 --- a/tests/pool/test_pool_common.py +++ b/tests/pool/test_pool_common.py @@ -9,13 +9,13 @@ import pytest -import psycopg +import gaussdb from ..utils import set_autocommit from ..acompat import Event, gather, is_alive, skip_async, skip_sync, sleep, spawn try: - import psycopg_pool as pool + import gaussdb_pool as pool except ImportError: # Tests should have been skipped if the package is not available pass @@ -38,7 +38,7 @@ def test_defaults(pool_cls, dsn): def test_connection_class(pool_cls, dsn): - class MyConn(psycopg.Connection[Any]): + class MyConn(gaussdb.Connection[Any]): pass with pool_cls(dsn, connection_class=MyConn, min_size=min_size(pool_cls)) as p: @@ -129,7 +129,7 @@ def test_setup_no_timeout(pool_cls, dsn, proxy): @pytest.mark.slow def test_configure_badstate(pool_cls, dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") def configure(conn): conn.execute("select 1") @@ -144,7 +144,7 @@ def configure(conn): @pytest.mark.slow def test_configure_broken(pool_cls, dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") def configure(conn): with conn.transaction(): @@ -499,7 +499,7 @@ def test_reopen(pool_cls, dsn): assert p._sched_runner is None assert not p._workers - with pytest.raises(psycopg.OperationalError, match="cannot be reused"): + with pytest.raises(gaussdb.OperationalError, match="cannot be reused"): p.open() @@ -586,8 +586,8 @@ def worker(n): def test_debug_deadlock(pool_cls, dsn): - # https://github.com/psycopg/psycopg/issues/230 - logger = logging.getLogger("psycopg") + # https://github.com/gaussdb/gaussdb/issues/230 + logger = logging.getLogger("gaussdb") handler = logging.StreamHandler() old_level = logger.level logger.setLevel(logging.DEBUG) @@ -609,12 +609,12 @@ def test_check_connection(pool_cls, conn_cls, dsn, autocommit): set_autocommit(conn, autocommit) pool_cls.check_connection(conn) assert not conn.closed - assert conn.info.transaction_status == psycopg.pq.TransactionStatus.IDLE + assert conn.info.transaction_status == gaussdb.pq.TransactionStatus.IDLE with conn_cls.connect(dsn) as conn2: conn2.execute("select pg_terminate_backend(%s)", [conn.info.backend_pid]) - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pool_cls.check_connection(conn) assert conn.closed @@ -651,7 +651,7 @@ def check(conn): @skip_sync def test_cancellation_in_queue(pool_cls, dsn): - # https://github.com/psycopg/psycopg/issues/509 + # https://github.com/gaussdb/gaussdb/issues/509 nconns = 3 @@ -728,8 +728,8 @@ def connect_delay(*args, **kwargs): sleep(max(0, sec - (t1 - t0))) return rv - connect_orig = psycopg.Connection.connect - monkeypatch.setattr(psycopg.Connection, "connect", connect_delay) + connect_orig = gaussdb.Connection.connect + monkeypatch.setattr(gaussdb.Connection, "connect", connect_delay) def ensure_waiting(p, num=1): diff --git a/tests/pool/test_pool_common_async.py b/tests/pool/test_pool_common_async.py index f766e6f60..2860b945b 100644 --- a/tests/pool/test_pool_common_async.py +++ b/tests/pool/test_pool_common_async.py @@ -6,13 +6,13 @@ import pytest -import psycopg +import gaussdb from ..utils import set_autocommit from ..acompat import AEvent, asleep, gather, is_alive, skip_async, skip_sync, spawn try: - import psycopg_pool as pool + import gaussdb_pool as pool except ImportError: # Tests should have been skipped if the package is not available pass @@ -37,7 +37,7 @@ async def test_defaults(pool_cls, dsn): async def test_connection_class(pool_cls, dsn): - class MyConn(psycopg.AsyncConnection[Any]): + class MyConn(gaussdb.AsyncConnection[Any]): pass async with pool_cls(dsn, connection_class=MyConn, min_size=min_size(pool_cls)) as p: @@ -140,7 +140,7 @@ async def test_setup_no_timeout(pool_cls, dsn, proxy): @pytest.mark.slow async def test_configure_badstate(pool_cls, dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") async def configure(conn): await conn.execute("select 1") @@ -155,7 +155,7 @@ async def configure(conn): @pytest.mark.slow async def test_configure_broken(pool_cls, dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") async def configure(conn): async with conn.transaction(): @@ -508,7 +508,7 @@ async def test_reopen(pool_cls, dsn): assert p._sched_runner is None assert not p._workers - with pytest.raises(psycopg.OperationalError, match="cannot be reused"): + with pytest.raises(gaussdb.OperationalError, match="cannot be reused"): await p.open() @@ -593,8 +593,8 @@ async def worker(n): async def test_debug_deadlock(pool_cls, dsn): - # https://github.com/psycopg/psycopg/issues/230 - logger = logging.getLogger("psycopg") + # https://github.com/gaussdb/gaussdb/issues/230 + logger = logging.getLogger("gaussdb") handler = logging.StreamHandler() old_level = logger.level logger.setLevel(logging.DEBUG) @@ -616,12 +616,12 @@ async def test_check_connection(pool_cls, aconn_cls, dsn, autocommit): await set_autocommit(conn, autocommit) await pool_cls.check_connection(conn) assert not conn.closed - assert conn.info.transaction_status == psycopg.pq.TransactionStatus.IDLE + assert conn.info.transaction_status == gaussdb.pq.TransactionStatus.IDLE async with await aconn_cls.connect(dsn) as conn2: await conn2.execute("select pg_terminate_backend(%s)", [conn.info.backend_pid]) - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): await pool_cls.check_connection(conn) assert conn.closed @@ -657,7 +657,7 @@ async def check(conn): @skip_sync async def test_cancellation_in_queue(pool_cls, dsn): - # https://github.com/psycopg/psycopg/issues/509 + # https://github.com/gaussdb/gaussdb/issues/509 nconns = 3 @@ -735,8 +735,8 @@ async def connect_delay(*args, **kwargs): await asleep(max(0, sec - (t1 - t0))) return rv - connect_orig = psycopg.AsyncConnection.connect - monkeypatch.setattr(psycopg.AsyncConnection, "connect", connect_delay) + connect_orig = gaussdb.AsyncConnection.connect + monkeypatch.setattr(gaussdb.AsyncConnection, "connect", connect_delay) async def ensure_waiting(p, num=1): diff --git a/tests/pool/test_pool_null.py b/tests/pool/test_pool_null.py index dca8b3d6a..ff8a32bbe 100644 --- a/tests/pool/test_pool_null.py +++ b/tests/pool/test_pool_null.py @@ -9,16 +9,16 @@ import pytest from packaging.version import parse as ver # noqa: F401 # used in skipif -import psycopg -from psycopg.pq import TransactionStatus -from psycopg.rows import Row, TupleRow, class_row +import gaussdb +from gaussdb.pq import TransactionStatus +from gaussdb.rows import Row, TupleRow, class_row from ..utils import assert_type, set_autocommit from ..acompat import Event, gather, skip_sync, sleep, spawn from .test_pool_common import delay_connection, ensure_waiting try: - import psycopg_pool as pool + import gaussdb_pool as pool except ImportError: # Tests should have been skipped if the package is not available pass @@ -50,10 +50,10 @@ class MyRow(dict[str, Any]): def test_generic_connection_type(dsn): - def configure(conn: psycopg.Connection[Any]) -> None: + def configure(conn: gaussdb.Connection[Any]) -> None: set_autocommit(conn, True) - class MyConnection(psycopg.Connection[Row]): + class MyConnection(gaussdb.Connection[Row]): pass with pool.NullConnectionPool( @@ -83,10 +83,10 @@ class MyConnection(psycopg.Connection[Row]): def test_non_generic_connection_type(dsn): - def configure(conn: psycopg.Connection[Any]) -> None: + def configure(conn: gaussdb.Connection[Any]) -> None: set_autocommit(conn, True) - class MyConnection(psycopg.Connection[MyRow]): + class MyConnection(gaussdb.Connection[MyRow]): def __init__(self, *args: Any, **kwargs: Any): kwargs["row_factory"] = class_row(MyRow) @@ -203,7 +203,7 @@ def worker(): @pytest.mark.opengauss_skip("backend pid") @pytest.mark.crdb_skip("backend pid") def test_reset_badstate(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") def reset(conn): conn.execute("reset all") @@ -234,7 +234,7 @@ def worker(): @pytest.mark.opengauss_skip("backend pid") @pytest.mark.crdb_skip("backend pid") def test_reset_broken(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") def reset(conn): with conn.transaction(): @@ -263,7 +263,7 @@ def worker(): @pytest.mark.slow -@pytest.mark.skipif("ver(psycopg.__version__) < ver('3.0.8')") +@pytest.mark.skipif("ver(gaussdb.__version__) < ver('3.0.8')") def test_no_queue_timeout(proxy): with pool.NullConnectionPool( kwargs={"host": proxy.client_host, "port": proxy.client_port} @@ -275,7 +275,7 @@ def test_no_queue_timeout(proxy): @pytest.mark.crdb_skip("backend pid") def test_intrans_rollback(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") pids = [] def worker(): @@ -308,7 +308,7 @@ def worker(): @pytest.mark.crdb_skip("backend pid") def test_inerror_rollback(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") pids = [] def worker(): @@ -325,7 +325,7 @@ def worker(): ensure_waiting(p) pids.append(conn.info.backend_pid) - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): conn.execute("wat") assert conn.info.transaction_status == TransactionStatus.INERROR p.putconn(conn) @@ -341,7 +341,7 @@ def worker(): @pytest.mark.crdb_skip("backend pid") @pytest.mark.crdb_skip("copy") def test_active_close(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") pids = [] def worker(): @@ -371,7 +371,7 @@ def worker(): @pytest.mark.opengauss_skip("backend pid") @pytest.mark.crdb_skip("backend pid") def test_fail_rollback_close(dsn, caplog, monkeypatch): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") pids = [] def worker(): @@ -393,7 +393,7 @@ def bad_rollback(): monkeypatch.setattr(conn, "rollback", bad_rollback) pids.append(conn.info.backend_pid) - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): conn.execute("wat") assert conn.info.transaction_status == TransactionStatus.INERROR p.putconn(conn) @@ -461,7 +461,7 @@ def test_stats_connect(dsn, proxy, monkeypatch): @skip_sync def test_cancellation_in_queue(dsn): - # https://github.com/psycopg/psycopg/issues/509 + # https://github.com/gaussdb/gaussdb/issues/509 nconns = 3 diff --git a/tests/pool/test_pool_null_async.py b/tests/pool/test_pool_null_async.py index bde7612b3..17efe495d 100644 --- a/tests/pool/test_pool_null_async.py +++ b/tests/pool/test_pool_null_async.py @@ -6,16 +6,16 @@ import pytest from packaging.version import parse as ver # noqa: F401 # used in skipif -import psycopg -from psycopg.pq import TransactionStatus -from psycopg.rows import Row, TupleRow, class_row +import gaussdb +from gaussdb.pq import TransactionStatus +from gaussdb.rows import Row, TupleRow, class_row from ..utils import assert_type, set_autocommit from ..acompat import AEvent, asleep, gather, skip_sync, spawn from .test_pool_common_async import delay_connection, ensure_waiting try: - import psycopg_pool as pool + import gaussdb_pool as pool except ImportError: # Tests should have been skipped if the package is not available pass @@ -46,10 +46,10 @@ class MyRow(dict[str, Any]): async def test_generic_connection_type(dsn): - async def configure(conn: psycopg.AsyncConnection[Any]) -> None: + async def configure(conn: gaussdb.AsyncConnection[Any]) -> None: await set_autocommit(conn, True) - class MyConnection(psycopg.AsyncConnection[Row]): + class MyConnection(gaussdb.AsyncConnection[Row]): pass async with pool.AsyncNullConnectionPool( @@ -80,10 +80,10 @@ class MyConnection(psycopg.AsyncConnection[Row]): async def test_non_generic_connection_type(dsn): - async def configure(conn: psycopg.AsyncConnection[Any]) -> None: + async def configure(conn: gaussdb.AsyncConnection[Any]) -> None: await set_autocommit(conn, True) - class MyConnection(psycopg.AsyncConnection[MyRow]): + class MyConnection(gaussdb.AsyncConnection[MyRow]): def __init__(self, *args: Any, **kwargs: Any): kwargs["row_factory"] = class_row(MyRow) super().__init__(*args, **kwargs) @@ -199,7 +199,7 @@ async def worker(): @pytest.mark.opengauss_skip("backend pid") @pytest.mark.crdb_skip("backend pid") async def test_reset_badstate(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") async def reset(conn): await conn.execute("reset all") @@ -230,7 +230,7 @@ async def worker(): @pytest.mark.opengauss_skip("backend pid") @pytest.mark.crdb_skip("backend pid") async def test_reset_broken(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") async def reset(conn): async with conn.transaction(): @@ -259,7 +259,7 @@ async def worker(): @pytest.mark.slow -@pytest.mark.skipif("ver(psycopg.__version__) < ver('3.0.8')") +@pytest.mark.skipif("ver(gaussdb.__version__) < ver('3.0.8')") async def test_no_queue_timeout(proxy): async with pool.AsyncNullConnectionPool( kwargs={"host": proxy.client_host, "port": proxy.client_port} @@ -271,7 +271,7 @@ async def test_no_queue_timeout(proxy): @pytest.mark.crdb_skip("backend pid") async def test_intrans_rollback(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") pids = [] async def worker(): @@ -304,7 +304,7 @@ async def worker(): @pytest.mark.crdb_skip("backend pid") async def test_inerror_rollback(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") pids = [] async def worker(): @@ -321,7 +321,7 @@ async def worker(): await ensure_waiting(p) pids.append(conn.info.backend_pid) - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): await conn.execute("wat") assert conn.info.transaction_status == TransactionStatus.INERROR await p.putconn(conn) @@ -337,7 +337,7 @@ async def worker(): @pytest.mark.crdb_skip("backend pid") @pytest.mark.crdb_skip("copy") async def test_active_close(dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") pids = [] async def worker(): @@ -367,7 +367,7 @@ async def worker(): @pytest.mark.opengauss_skip("backend pid") @pytest.mark.crdb_skip("backend pid") async def test_fail_rollback_close(dsn, caplog, monkeypatch): - caplog.set_level(logging.WARNING, logger="psycopg.pool") + caplog.set_level(logging.WARNING, logger="gaussdb.pool") pids = [] async def worker(): @@ -389,7 +389,7 @@ async def bad_rollback(): monkeypatch.setattr(conn, "rollback", bad_rollback) pids.append(conn.info.backend_pid) - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): await conn.execute("wat") assert conn.info.transaction_status == TransactionStatus.INERROR await p.putconn(conn) @@ -457,7 +457,7 @@ async def test_stats_connect(dsn, proxy, monkeypatch): @skip_sync async def test_cancellation_in_queue(dsn): - # https://github.com/psycopg/psycopg/issues/509 + # https://github.com/gaussdb/gaussdb/issues/509 nconns = 3 diff --git a/tests/pool/test_sched.py b/tests/pool/test_sched.py index f579801a0..1fdf10f3a 100644 --- a/tests/pool/test_sched.py +++ b/tests/pool/test_sched.py @@ -11,7 +11,7 @@ from ..acompat import gather, sleep, spawn try: - from psycopg_pool.sched import Scheduler + from gaussdb_pool.sched import Scheduler except ImportError: # Tests should have been skipped if the package is not available pass @@ -69,7 +69,7 @@ def worker(i): @pytest.mark.slow def test_sched_error(caplog): - caplog.set_level(logging.WARNING, logger="psycopg") + caplog.set_level(logging.WARNING, logger="gaussdb") s = Scheduler() t = spawn(s.run) diff --git a/tests/pool/test_sched_async.py b/tests/pool/test_sched_async.py index 1add98ccd..9ad81359a 100644 --- a/tests/pool/test_sched_async.py +++ b/tests/pool/test_sched_async.py @@ -8,7 +8,7 @@ from ..acompat import asleep, gather, spawn try: - from psycopg_pool.sched_async import AsyncScheduler + from gaussdb_pool.sched_async import AsyncScheduler except ImportError: # Tests should have been skipped if the package is not available pass @@ -68,7 +68,7 @@ async def worker(i): @pytest.mark.slow async def test_sched_error(caplog): - caplog.set_level(logging.WARNING, logger="psycopg") + caplog.set_level(logging.WARNING, logger="gaussdb") s = AsyncScheduler() t = spawn(s.run) diff --git a/tests/pq/test_async.py b/tests/pq/test_async.py index aff6ccdf9..c0eb8b182 100644 --- a/tests/pq/test_async.py +++ b/tests/pq/test_async.py @@ -2,13 +2,13 @@ import pytest -import psycopg -from psycopg import pq -from psycopg.generators import execute +import gaussdb +from gaussdb import pq +from gaussdb.generators import execute def execute_wait(pgconn): - return psycopg.waiting.wait(execute(pgconn), pgconn.socket) + return gaussdb.waiting.wait(execute(pgconn), pgconn.socket) def test_send_query(pgconn): @@ -64,7 +64,7 @@ def test_send_query(pgconn): def test_send_query_compact_test(pgconn): - # Like the above test but use psycopg facilities for compactness + # Like the above test but use gaussdb facilities for compactness pgconn.send_query( b"/* %s */ select 'x' as f from pg_sleep(0.01); select 1 as foo;" % (b"x" * 1_000_000) @@ -80,7 +80,7 @@ def test_send_query_compact_test(pgconn): assert results[1].get_value(0, 0) == b"1" pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.send_query(b"select 1") @@ -141,7 +141,7 @@ def test_send_query_params(pgconn): assert res.get_value(0, 0) == b"8" pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.send_query_params(b"select $1", [b"1"]) @@ -155,9 +155,9 @@ def test_send_prepare(pgconn): assert res.get_value(0, 0) == b"8" pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.send_prepare(b"prep", b"select $1::int + $2::int") - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.send_query_prepared(b"prep", [b"3", b"5"]) @@ -213,7 +213,7 @@ def test_send_describe_prepared(pgconn): assert res.ftype(0) == 20 pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.send_describe_prepared(b"prep") @@ -235,7 +235,7 @@ def test_send_close_prepared(pgconn): @pytest.mark.libpq("< 17") def test_send_close_prepared_no_close(pgconn): - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): pgconn.send_close_prepared(b"prep") @@ -256,7 +256,7 @@ def test_send_describe_portal(pgconn): assert res.fname(0) == b"foo" pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.send_describe_portal(b"cur") @@ -283,5 +283,5 @@ def test_send_close_portal(pgconn): @pytest.mark.libpq("< 17") def test_send_close_portal_no_close(pgconn): - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): pgconn.send_close_portal(b"cur") diff --git a/tests/pq/test_conninfo.py b/tests/pq/test_conninfo.py index 64d8b8fd6..942dc760d 100644 --- a/tests/pq/test_conninfo.py +++ b/tests/pq/test_conninfo.py @@ -1,7 +1,7 @@ import pytest -import psycopg -from psycopg import pq +import gaussdb +from gaussdb import pq def test_defaults(monkeypatch): @@ -43,6 +43,6 @@ def test_conninfo_parse_96(): def test_conninfo_parse_bad(): - with pytest.raises(psycopg.OperationalError) as e: + with pytest.raises(gaussdb.OperationalError) as e: pq.Conninfo.parse(b"bad_conninfo=") assert "bad_conninfo" in str(e.value) diff --git a/tests/pq/test_copy.py b/tests/pq/test_copy.py index 17af2a62a..6352f86e4 100644 --- a/tests/pq/test_copy.py +++ b/tests/pq/test_copy.py @@ -1,7 +1,7 @@ import pytest -import psycopg -from psycopg import pq +import gaussdb +from gaussdb import pq pytestmark = pytest.mark.crdb_skip("copy") @@ -33,20 +33,20 @@ def test_put_data_no_copy(pgconn): - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.put_copy_data(b"wat") pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.put_copy_data(b"wat") def test_put_end_no_copy(pgconn): - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.put_copy_end() pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.put_copy_end() @@ -139,11 +139,11 @@ def test_copy_out_error_end(pgconn): def test_get_data_no_copy(pgconn): - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.get_copy_data(0) pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.get_copy_data(0) diff --git a/tests/pq/test_escaping.py b/tests/pq/test_escaping.py index ad88d8a03..2faa91ebe 100644 --- a/tests/pq/test_escaping.py +++ b/tests/pq/test_escaping.py @@ -1,7 +1,7 @@ import pytest -import psycopg -from psycopg import pq +import gaussdb +from gaussdb import pq from ..fix_crdb import crdb_scs_off @@ -36,12 +36,12 @@ def test_escape_literal_1char(pgconn, scs): def test_escape_literal_noconn(pgconn): esc = pq.Escaping() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): esc.escape_literal(b"hi") esc = pq.Escaping(pgconn) pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): esc.escape_literal(b"hi") @@ -75,12 +75,12 @@ def test_escape_identifier_1char(pgconn, scs): def test_escape_identifier_noconn(pgconn): esc = pq.Escaping() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): esc.escape_identifier(b"hi") esc = pq.Escaping(pgconn) pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): esc.escape_identifier(b"hi") @@ -134,7 +134,7 @@ def test_escape_string_noconn(data, want): def test_escape_string_badconn(pgconn): esc = pq.Escaping(pgconn) pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): esc.escape_string(b"hi") @@ -143,7 +143,7 @@ def test_escape_string_badenc(pgconn): assert res.status == pq.ExecStatus.COMMAND_OK data = "\u20ac".encode()[:-1] esc = pq.Escaping(pgconn) - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): esc.escape_string(data) @@ -155,7 +155,7 @@ def test_escape_bytea(pgconn, data): assert rv == exp pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): esc.escape_bytea(data) @@ -184,5 +184,5 @@ def test_unescape_bytea(pgconn, data): assert rv == data pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): esc.unescape_bytea(data) diff --git a/tests/pq/test_exec.py b/tests/pq/test_exec.py index 3a7372875..f89df87ce 100644 --- a/tests/pq/test_exec.py +++ b/tests/pq/test_exec.py @@ -2,8 +2,8 @@ import pytest -import psycopg -from psycopg import pq +import gaussdb +from gaussdb import pq def test_exec_none(pgconn): @@ -15,7 +15,7 @@ def test_exec(pgconn): res = pgconn.exec_(b"select 'hel' || 'lo'") assert res.get_value(0, 0) == b"hello" pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.exec_(b"select 'hello'") @@ -24,7 +24,7 @@ def test_exec_params(pgconn): assert res.status == pq.ExecStatus.TUPLES_OK assert res.get_value(0, 0) == b"8" pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.exec_params(b"select $1::int + $2", [b"5", b"3"]) @@ -87,9 +87,9 @@ def test_prepare(pgconn): assert res.get_value(0, 0) == b"8" pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.prepare(b"prep", b"select $1::int + $2::int") - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.exec_prepared(b"prep", [b"3", b"5"]) @@ -141,7 +141,7 @@ def test_close_prepared(pgconn): @pytest.mark.libpq("< 17") def test_close_prepared_no_close(pgconn): - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): pgconn.close_prepared(b"cur") @@ -161,7 +161,7 @@ def test_describe_portal(pgconn): assert res.fname(0) == b"foo" pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.describe_portal(b"cur") @@ -186,5 +186,5 @@ def test_close_portal(pgconn): @pytest.mark.libpq("< 17") def test_close_portal_no_close(pgconn): - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): pgconn.close_portal(b"cur") diff --git a/tests/pq/test_misc.py b/tests/pq/test_misc.py index 3730e1418..215fa06bb 100644 --- a/tests/pq/test_misc.py +++ b/tests/pq/test_misc.py @@ -1,7 +1,7 @@ import pytest -import psycopg -from psycopg import pq +import gaussdb +from gaussdb import pq def test_error_message(pgconn): @@ -75,7 +75,7 @@ def test_result_set_attrs(pgconn): assert res.ftype(1) == 1700 assert res.ftype(2) == 25 - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): res.set_attributes(attrs) diff --git a/tests/pq/test_pgconn.py b/tests/pq/test_pgconn.py index dc0860275..df11f8aa4 100644 --- a/tests/pq/test_pgconn.py +++ b/tests/pq/test_pgconn.py @@ -14,12 +14,12 @@ import pytest -import psycopg -import psycopg.generators -from psycopg import pq +import gaussdb +import gaussdb.generators +from gaussdb import pq if TYPE_CHECKING: - from psycopg.pq.abc import PGcancelConn, PGconn + from gaussdb.pq.abc import PGcancelConn, PGconn def wait( @@ -51,7 +51,7 @@ def test_connectdb(dsn): def test_connectdb_error(): - conn = pq.PGconn.connect(b"dbname=psycopg_test_not_for_real") + conn = pq.PGconn.connect(b"dbname=gaussdb_test_not_for_real") assert conn.status == pq.ConnStatus.BAD @@ -66,14 +66,14 @@ def test_connect_async(dsn): conn.nonblocking = 1 wait(conn) conn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): conn.connect_poll() @pytest.mark.crdb("skip", reason="connects to any db name") def test_connect_async_bad(dsn): parsed_dsn = {e.keyword: e.val for e in pq.Conninfo.parse(dsn.encode()) if e.val} - parsed_dsn[b"dbname"] = b"psycopg_test_not_for_real" + parsed_dsn[b"dbname"] = b"gaussdb_test_not_for_real" dsn = b" ".join(b"%s='%s'" % item for item in parsed_dsn.items()) conn = pq.PGconn.connect_start(dsn) wait(conn, return_on=pq.PollingStatus.FAILED) @@ -126,7 +126,7 @@ def test_reset(pgconn): assert pgconn.status == pq.ConnStatus.OK pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.reset() assert pgconn.status == pq.ConnStatus.BAD @@ -141,10 +141,10 @@ def test_reset_async(pgconn): wait(pgconn, "reset_poll") pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.reset_start() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.reset_poll() @@ -160,7 +160,7 @@ def test_password(pgconn): # not in info assert isinstance(pgconn.password, bytes) pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.password @@ -168,7 +168,7 @@ def test_host(pgconn): # might be not in info assert isinstance(pgconn.host, bytes) pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.host @@ -177,13 +177,13 @@ def test_hostaddr(pgconn): # not in info assert isinstance(pgconn.hostaddr, bytes), pgconn.hostaddr pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.hostaddr @pytest.mark.libpq("< 12") def test_hostaddr_missing(pgconn): - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): pgconn.hostaddr @@ -193,19 +193,19 @@ def test_transaction_status(pgconn): assert pgconn.transaction_status == pq.TransactionStatus.INTRANS pgconn.send_query(b"select 1") assert pgconn.transaction_status == pq.TransactionStatus.ACTIVE - psycopg.waiting.wait(psycopg.generators.execute(pgconn), pgconn.socket) + gaussdb.waiting.wait(gaussdb.generators.execute(pgconn), pgconn.socket) assert pgconn.transaction_status == pq.TransactionStatus.INTRANS pgconn.finish() assert pgconn.transaction_status == pq.TransactionStatus.UNKNOWN def test_parameter_status(dsn, monkeypatch): - monkeypatch.setenv("PGAPPNAME", "psycopg tests") + monkeypatch.setenv("PGAPPNAME", "gaussdb tests") pgconn = pq.PGconn.connect(dsn.encode()) - assert pgconn.parameter_status(b"application_name") == b"psycopg tests" + assert pgconn.parameter_status(b"application_name") == b"gaussdb tests" assert pgconn.parameter_status(b"wat") is None pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.parameter_status(b"application_name") @@ -224,14 +224,14 @@ def test_encoding(pgconn): assert pgconn.parameter_status(b"client_encoding") == b"UTF8" pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.parameter_status(b"client_encoding") def test_protocol_version(pgconn): assert pgconn.protocol_version == 3 pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.protocol_version @@ -239,7 +239,7 @@ def test_protocol_version(pgconn): def test_server_version(pgconn): assert pgconn.server_version >= "505.2.0" pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.server_version @@ -251,7 +251,7 @@ def test_socket(pgconn): # so let's see if at least an ok value comes out of it. try: assert pgconn.socket == socket - except psycopg.OperationalError: + except gaussdb.OperationalError: pass @@ -278,7 +278,7 @@ def test_get_error_message(pgconn): def test_backend_pid(pgconn): assert isinstance(pgconn.backend_pid, int) pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.backend_pid @@ -309,7 +309,7 @@ def test_used_password(pgconn, dsn, monkeypatch): def test_set_single_row_mode(pgconn): - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.set_single_row_mode() pgconn.send_query(b"select 1") @@ -318,7 +318,7 @@ def test_set_single_row_mode(pgconn): @pytest.mark.libpq(">= 17") def test_set_chunked_rows_mode(pgconn): - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.set_chunked_rows_mode(42) pgconn.send_query(b"select 1") @@ -413,13 +413,13 @@ def test_cancel_conn_finished(pgconn): cancel_conn = pgconn.cancel_conn() cancel_conn.reset() cancel_conn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): cancel_conn.start() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): cancel_conn.blocking() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): cancel_conn.poll() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): cancel_conn.reset() assert cancel_conn.get_error_message() == "connection pointer is NULL" @@ -430,14 +430,14 @@ def test_cancel(pgconn): cancel.cancel() pgconn.finish() cancel.cancel() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): pgconn.get_cancel() def test_cancel_free(pgconn): cancel = pgconn.get_cancel() cancel.free() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): cancel.cancel() cancel.free() @@ -471,7 +471,7 @@ def callback(res): @pytest.mark.crdb_skip("do") def test_notice_error(pgconn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg") + caplog.set_level(logging.WARNING, logger="gaussdb") def callback(res): raise Exception("hello error") @@ -495,7 +495,7 @@ def test_trace_pre14(pgconn, tmp_path): tracef = tmp_path / "trace" with tracef.open("w") as f: pgconn.trace(f.fileno()) - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): pgconn.set_trace_flags(0) pgconn.exec_(b"select 1") pgconn.untrace() @@ -527,21 +527,21 @@ def test_trace(pgconn, tmp_path): @pytest.mark.skipif("sys.platform == 'linux'") def test_trace_nonlinux(pgconn): - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): pgconn.trace(1) @pytest.mark.libpq(">= 17") def test_change_password_error(pgconn): with pytest.raises( - psycopg.OperationalError, match='role(/user)? "ashesh" does not exist' + gaussdb.OperationalError, match='role(/user)? "ashesh" does not exist' ): - pgconn.change_password(b"ashesh", b"psycopg") + pgconn.change_password(b"ashesh", b"gaussdb") @pytest.fixture def role(pgconn: PGconn) -> Iterator[tuple[bytes, bytes]]: - user, passwd = "ashesh", "psycopg2" + user, passwd = "ashesh", "_GaussDB" r = pgconn.exec_(f"CREATE USER {user} LOGIN PASSWORD '{passwd}'".encode()) if r.status != pq.ExecStatus.COMMAND_OK: pytest.skip(f"cannot create a PostgreSQL role: {r.get_error_message()}") @@ -562,28 +562,28 @@ def test_change_password(pgconn, dsn, role): conn = pq.PGconn.connect(b" ".join(b"%s='%s'" % item for item in conninfo.items())) assert conn.status == pq.ConnStatus.OK, conn.error_message - pgconn.change_password(user, b"psycopg") - conninfo[b"password"] = b"psycopg" + pgconn.change_password(user, b"gaussdb") + conninfo[b"password"] = b"gaussdb" conn = pq.PGconn.connect(b" ".join(b"%s='%s'" % item for item in conninfo.items())) assert conn.status == pq.ConnStatus.OK, conn.error_message @pytest.mark.libpq(">= 10") def test_encrypt_password(pgconn): - enc = pgconn.encrypt_password(b"psycopg2", b"ashesh", b"md5") + enc = pgconn.encrypt_password(b"_GaussDB", b"ashesh", b"md5") assert enc == b"md594839d658c28a357126f105b9cb14cfc" @pytest.mark.libpq(">= 10") def test_encrypt_password_scram(pgconn): - enc = pgconn.encrypt_password(b"psycopg2", b"ashesh", b"scram-sha-256") + enc = pgconn.encrypt_password(b"_GaussDB", b"ashesh", b"scram-sha-256") assert enc.startswith(b"SCRAM-SHA-256$") @pytest.mark.libpq(">= 10") def test_encrypt_password_badalgo(pgconn): - with pytest.raises(psycopg.OperationalError): - assert pgconn.encrypt_password(b"psycopg2", b"ashesh", b"wat") + with pytest.raises(gaussdb.OperationalError): + assert pgconn.encrypt_password(b"_GaussDB", b"ashesh", b"wat") @pytest.mark.libpq(">= 10") @@ -591,30 +591,30 @@ def test_encrypt_password_badalgo(pgconn): def test_encrypt_password_query(pgconn): res = pgconn.exec_(b"set password_encryption to 'md5'") assert res.status == pq.ExecStatus.COMMAND_OK, pgconn.get_error_message() - enc = pgconn.encrypt_password(b"psycopg2", b"ashesh") + enc = pgconn.encrypt_password(b"_GaussDB", b"ashesh") assert enc == b"md594839d658c28a357126f105b9cb14cfc" res = pgconn.exec_(b"set password_encryption to 'scram-sha-256'") assert res.status == pq.ExecStatus.COMMAND_OK - enc = pgconn.encrypt_password(b"psycopg2", b"ashesh") + enc = pgconn.encrypt_password(b"_GaussDB", b"ashesh") assert enc.startswith(b"SCRAM-SHA-256$") @pytest.mark.libpq(">= 10") def test_encrypt_password_closed(pgconn): pgconn.finish() - with pytest.raises(psycopg.OperationalError): - assert pgconn.encrypt_password(b"psycopg2", b"ashesh") + with pytest.raises(gaussdb.OperationalError): + assert pgconn.encrypt_password(b"_GaussDB", b"ashesh") @pytest.mark.libpq("< 10") def test_encrypt_password_not_supported(pgconn): # it might even be supported, but not worth the lifetime - with pytest.raises(psycopg.NotSupportedError): - pgconn.encrypt_password(b"psycopg2", b"ashesh", b"md5") + with pytest.raises(gaussdb.NotSupportedError): + pgconn.encrypt_password(b"_GaussDB", b"ashesh", b"md5") - with pytest.raises(psycopg.NotSupportedError): - pgconn.encrypt_password(b"psycopg2", b"ashesh", b"scram-sha-256") + with pytest.raises(gaussdb.NotSupportedError): + pgconn.encrypt_password(b"_GaussDB", b"ashesh", b"scram-sha-256") def test_str(pgconn, dsn): diff --git a/tests/pq/test_pgresult.py b/tests/pq/test_pgresult.py index 3c4267133..cdc1f6440 100644 --- a/tests/pq/test_pgresult.py +++ b/tests/pq/test_pgresult.py @@ -2,7 +2,7 @@ import pytest -from psycopg import pq +from gaussdb import pq @pytest.mark.parametrize( diff --git a/tests/pq/test_pipeline.py b/tests/pq/test_pipeline.py index 00cd54abe..d55cbf6ee 100644 --- a/tests/pq/test_pipeline.py +++ b/tests/pq/test_pipeline.py @@ -1,19 +1,19 @@ import pytest -import psycopg -from psycopg import pq +import gaussdb +from gaussdb import pq @pytest.mark.libpq("< 14") def test_old_libpq(pgconn): assert pgconn.pipeline_status == 0 - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): pgconn.enter_pipeline_mode() - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): pgconn.exit_pipeline_mode() - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): pgconn.pipeline_sync() - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): pgconn.send_flush_request() @@ -23,7 +23,7 @@ def test_work_in_progress(pgconn): assert pgconn.pipeline_status == pq.PipelineStatus.OFF pgconn.enter_pipeline_mode() pgconn.send_query_params(b"select $1", [b"1"]) - with pytest.raises(psycopg.OperationalError, match="cannot exit pipeline mode"): + with pytest.raises(gaussdb.OperationalError, match="cannot exit pipeline mode"): pgconn.exit_pipeline_mode() diff --git a/tests/pq/test_pq.py b/tests/pq/test_pq.py index 0b8a86c5f..d2266a54a 100644 --- a/tests/pq/test_pq.py +++ b/tests/pq/test_pq.py @@ -2,8 +2,8 @@ import pytest -import psycopg -from psycopg import pq +import gaussdb +from gaussdb import pq from ..utils import check_libpq_version @@ -18,16 +18,16 @@ def test_build_version(): assert pq.__build_version__ and pq.__build_version__ >= 70400 -@pytest.mark.skipif("not os.environ.get('PSYCOPG_TEST_WANT_LIBPQ_BUILD')") +@pytest.mark.skipif("not os.environ.get('GAUSSDB_TEST_WANT_LIBPQ_BUILD')") def test_want_built_version(): - want = os.environ["PSYCOPG_TEST_WANT_LIBPQ_BUILD"] + want = os.environ["GAUSSDB_TEST_WANT_LIBPQ_BUILD"] got = pq.__build_version__ assert not check_libpq_version(got, want) -@pytest.mark.skipif("not os.environ.get('PSYCOPG_TEST_WANT_LIBPQ_IMPORT')") +@pytest.mark.skipif("not os.environ.get('GAUSSDB_TEST_WANT_LIBPQ_IMPORT')") def test_want_import_version(): - want = os.environ["PSYCOPG_TEST_WANT_LIBPQ_IMPORT"] + want = os.environ["GAUSSDB_TEST_WANT_LIBPQ_IMPORT"] got = pq.version() assert not check_libpq_version(got, want) @@ -38,8 +38,8 @@ def test_want_import_version(): @pytest.mark.libpq(">= 14") def test_pipeline_supported(conn): - assert psycopg.Pipeline.is_supported() - assert psycopg.AsyncPipeline.is_supported() + assert gaussdb.Pipeline.is_supported() + assert gaussdb.AsyncPipeline.is_supported() with conn.pipeline(): pass @@ -47,10 +47,10 @@ def test_pipeline_supported(conn): @pytest.mark.libpq("< 14") def test_pipeline_not_supported(conn): - assert not psycopg.Pipeline.is_supported() - assert not psycopg.AsyncPipeline.is_supported() + assert not gaussdb.Pipeline.is_supported() + assert not gaussdb.AsyncPipeline.is_supported() - with pytest.raises(psycopg.NotSupportedError) as exc: + with pytest.raises(gaussdb.NotSupportedError) as exc: with conn.pipeline(): pass diff --git a/tests/scripts/bench-411.py b/tests/scripts/bench-411.py index 7ab509f80..a98eb687b 100644 --- a/tests/scripts/bench-411.py +++ b/tests/scripts/bench-411.py @@ -21,9 +21,9 @@ class Driver(str, Enum): - psycopg2 = "psycopg2" + _GaussDB = "_GaussDB" psycopg2_green = "psycopg2_green" - psycopg = "psycopg" + gaussdb = "gaussdb" psycopg_async = "psycopg_async" asyncpg = "asyncpg" @@ -57,24 +57,24 @@ def main() -> None: if i == len(args.drivers) - 1: args.drop = drop_at_the_end - if name == Driver.psycopg2: - import psycopg2 # type: ignore + if name == Driver._GaussDB: + import _GaussDB # type: ignore - run_psycopg2(psycopg2, args) + run_psycopg2(_GaussDB, args) elif name == Driver.psycopg2_green: - import psycopg2 - import psycopg2.extras # type: ignore + import _GaussDB + import _GaussDB.extras # type: ignore - run_psycopg2_green(psycopg2, args) + run_psycopg2_green(_GaussDB, args) - elif name == Driver.psycopg: - import psycopg + elif name == Driver.gaussdb: + import gaussdb - run_psycopg(psycopg, args) + run_psycopg(gaussdb, args) elif name == Driver.psycopg_async: - import psycopg + import gaussdb if sys.platform == "win32": if hasattr(asyncio, "WindowsSelectorEventLoopPolicy"): @@ -82,7 +82,7 @@ def main() -> None: asyncio.WindowsSelectorEventLoopPolicy() ) - asyncio.run(run_psycopg_async(psycopg, args)) + asyncio.run(run_psycopg_async(gaussdb, args)) elif name == Driver.asyncpg: import asyncpg # type: ignore @@ -132,12 +132,12 @@ def time_log(message: str) -> Generator[None]: logger.info(f"Run {message} in {end-start} s") -def run_psycopg2(psycopg2: Any, args: Namespace) -> None: - logger.info("Running psycopg2") +def run_psycopg2(_GaussDB: Any, args: Namespace) -> None: + logger.info("Running _GaussDB") if args.create: logger.info(f"inserting {args.ntests} test records") - with psycopg2.connect(args.dsn) as conn: + with _GaussDB.connect(args.dsn) as conn: with conn.cursor() as cursor: cursor.execute(drop) cursor.execute(table) @@ -147,8 +147,8 @@ def run_psycopg2(psycopg2: Any, args: Namespace) -> None: def run(i): logger.info(f"thread {i} running {args.ntests} queries") to_query = random.choices(ids, k=args.ntests) - with psycopg2.connect(args.dsn) as conn: - with time_log("psycopg2"): + with _GaussDB.connect(args.dsn) as conn: + with time_log("_GaussDB"): for id_ in to_query: with conn.cursor() as cursor: cursor.execute(select, {"id": id_}) @@ -163,20 +163,20 @@ def run(i): if args.drop: logger.info("dropping test records") - with psycopg2.connect(args.dsn) as conn: + with _GaussDB.connect(args.dsn) as conn: with conn.cursor() as cursor: cursor.execute(drop) conn.commit() -def run_psycopg2_green(psycopg2: Any, args: Namespace) -> None: +def run_psycopg2_green(_GaussDB: Any, args: Namespace) -> None: logger.info("Running psycopg2_green") - psycopg2.extensions.set_wait_callback(psycopg2.extras.wait_select) + _GaussDB.extensions.set_wait_callback(_GaussDB.extras.wait_select) if args.create: logger.info(f"inserting {args.ntests} test records") - with psycopg2.connect(args.dsn) as conn: + with _GaussDB.connect(args.dsn) as conn: with conn.cursor() as cursor: cursor.execute(drop) cursor.execute(table) @@ -186,8 +186,8 @@ def run_psycopg2_green(psycopg2: Any, args: Namespace) -> None: def run(i): logger.info(f"thread {i} running {args.ntests} queries") to_query = random.choices(ids, k=args.ntests) - with psycopg2.connect(args.dsn) as conn: - with time_log("psycopg2"): + with _GaussDB.connect(args.dsn) as conn: + with time_log("_GaussDB"): for id_ in to_query: with conn.cursor() as cursor: cursor.execute(select, {"id": id_}) @@ -202,20 +202,20 @@ def run(i): if args.drop: logger.info("dropping test records") - with psycopg2.connect(args.dsn) as conn: + with _GaussDB.connect(args.dsn) as conn: with conn.cursor() as cursor: cursor.execute(drop) conn.commit() - psycopg2.extensions.set_wait_callback(None) + _GaussDB.extensions.set_wait_callback(None) -def run_psycopg(psycopg: Any, args: Namespace) -> None: - logger.info("Running psycopg sync") +def run_psycopg(gaussdb: Any, args: Namespace) -> None: + logger.info("Running gaussdb sync") if args.create: logger.info(f"inserting {args.ntests} test records") - with psycopg.connect(args.dsn) as conn: + with gaussdb.connect(args.dsn) as conn: with conn.cursor() as cursor: cursor.execute(drop) cursor.execute(table) @@ -225,8 +225,8 @@ def run_psycopg(psycopg: Any, args: Namespace) -> None: def run(i): logger.info(f"thread {i} running {args.ntests} queries") to_query = random.choices(ids, k=args.ntests) - with psycopg.connect(args.dsn) as conn: - with time_log("psycopg"): + with gaussdb.connect(args.dsn) as conn: + with time_log("gaussdb"): for id_ in to_query: with conn.cursor() as cursor: cursor.execute(select, {"id": id_}) @@ -241,20 +241,20 @@ def run(i): if args.drop: logger.info("dropping test records") - with psycopg.connect(args.dsn) as conn: + with gaussdb.connect(args.dsn) as conn: with conn.cursor() as cursor: cursor.execute(drop) conn.commit() -async def run_psycopg_async(psycopg: Any, args: Namespace) -> None: - logger.info("Running psycopg async") +async def run_psycopg_async(gaussdb: Any, args: Namespace) -> None: + logger.info("Running gaussdb async") conn: Any if args.create: logger.info(f"inserting {args.ntests} test records") - async with await psycopg.AsyncConnection.connect(args.dsn) as conn: + async with await gaussdb.AsyncConnection.connect(args.dsn) as conn: async with conn.cursor() as cursor: await cursor.execute(drop) await cursor.execute(table) @@ -264,7 +264,7 @@ async def run_psycopg_async(psycopg: Any, args: Namespace) -> None: async def run(i): logger.info(f"task {i} running {args.ntests} queries") to_query = random.choices(ids, k=args.ntests) - async with await psycopg.AsyncConnection.connect(args.dsn) as conn: + async with await gaussdb.AsyncConnection.connect(args.dsn) as conn: with time_log("psycopg_async"): for id_ in to_query: cursor = await conn.execute(select, {"id": id_}) @@ -280,7 +280,7 @@ async def run(i): if args.drop: logger.info("dropping test records") - async with await psycopg.AsyncConnection.connect(args.dsn) as conn: + async with await gaussdb.AsyncConnection.connect(args.dsn) as conn: async with conn.cursor() as cursor: await cursor.execute(drop) await conn.commit() @@ -358,9 +358,9 @@ def parse_cmdline() -> Namespace: parser.add_argument( "--dsn", - default=os.environ.get("PSYCOPG_TEST_DSN", ""), + default=os.environ.get("GAUSSDB_TEST_DSN", ""), help="database connection string" - " [default: %(default)r (from PSYCOPG_TEST_DSN env var)]", + " [default: %(default)r (from GAUSSDB_TEST_DSN env var)]", ) parser.add_argument( diff --git a/tests/scripts/copytest.py b/tests/scripts/copytest.py index 7303fa967..2c28e6714 100755 --- a/tests/scripts/copytest.py +++ b/tests/scripts/copytest.py @@ -9,9 +9,9 @@ from typing import Any from argparse import ArgumentParser, Namespace -import psycopg -from psycopg import sql -from psycopg.abc import Query +import gaussdb +from gaussdb import sql +from gaussdb.abc import Query logger = logging.getLogger() logging.basicConfig( @@ -32,9 +32,9 @@ def main(): def main_sync(args: Namespace) -> None: test = CopyPutTest(args) - with psycopg.Connection.connect(args.dsn) as conn: + with gaussdb.Connection.connect(args.dsn) as conn: with conn.cursor() as cur: - writer = getattr(psycopg.copy, args.writer)(cur) if args.writer else None + writer = getattr(gaussdb.copy, args.writer)(cur) if args.writer else None cur.execute(test.get_table_stmt()) t0 = time() with cur.copy(test.get_copy_stmt(), writer=writer) as copy: @@ -47,10 +47,10 @@ def main_sync(args: Namespace) -> None: async def main_async(args: Namespace) -> None: test = CopyPutTest(args) - async with await psycopg.AsyncConnection.connect(args.dsn) as conn: + async with await gaussdb.AsyncConnection.connect(args.dsn) as conn: async with conn.cursor() as cur: await cur.execute(test.get_table_stmt()) - writer = getattr(psycopg.copy, args.writer)(cur) if args.writer else None + writer = getattr(gaussdb.copy, args.writer)(cur) if args.writer else None t0 = time() async with cur.copy(test.get_copy_stmt(), writer=writer) as copy: for i in range(args.nrecs): @@ -140,7 +140,7 @@ def parse_cmdline() -> Namespace: if args.writer: try: - getattr(psycopg.copy, args.writer) + getattr(gaussdb.copy, args.writer) except AttributeError: parser.error(f"unknown writer: {args.writer!r}") diff --git a/tests/scripts/dectest.py b/tests/scripts/dectest.py index 27ea6d04c..1d9fc26ac 100644 --- a/tests/scripts/dectest.py +++ b/tests/scripts/dectest.py @@ -5,17 +5,17 @@ from random import randrange from decimal import Decimal -import psycopg -from psycopg import sql +import gaussdb +from gaussdb import sql ncols = 10 nrows = 500000 -format = psycopg.pq.Format.BINARY +format = gaussdb.pq.Format.BINARY test = "copy" def main() -> None: - cnn = psycopg.connect() + cnn = gaussdb.connect() cnn.execute( sql.SQL("create table testdec ({})").format( diff --git a/tests/scripts/pipeline-demo.py b/tests/scripts/pipeline-demo.py index 4808fc1f6..0998f32d1 100644 --- a/tests/scripts/pipeline-demo.py +++ b/tests/scripts/pipeline-demo.py @@ -19,14 +19,14 @@ from collections import deque from collections.abc import Iterator, Sequence -from psycopg import AsyncConnection, Connection -from psycopg import errors as e -from psycopg import pq, waiting -from psycopg.pq import DiagnosticField, Format -from psycopg.abc import PipelineCommand -from psycopg.generators import pipeline_communicate - -psycopg_logger = logging.getLogger("psycopg") +from gaussdb import AsyncConnection, Connection +from gaussdb import errors as e +from gaussdb import pq, waiting +from gaussdb.pq import DiagnosticField, Format +from gaussdb.abc import PipelineCommand +from gaussdb.generators import pipeline_communicate + +psycopg_logger = logging.getLogger("gaussdb") pipeline_logger = logging.getLogger("pipeline") args: argparse.Namespace @@ -291,7 +291,7 @@ def main() -> None: help="number of rows to insert", ) parser.add_argument( - "--pq", action="store_true", help="use low-level psycopg.pq API" + "--pq", action="store_true", help="use low-level gaussdb.pq API" ) parser.add_argument( "--async", dest="async_", action="store_true", help="use async API" @@ -326,7 +326,7 @@ def main() -> None: else: if pq.__impl__ != "python": parser.error( - "only supported for Python implementation (set PSYCOPG_IMPL=python)" + "only supported for Python implementation (set GAUSSDB_IMPL=python)" ) if args.async_: asyncio.run(pipeline_demo_async(args.nrows, args.many, pipeline_logger)) diff --git a/tests/scripts/spiketest.py b/tests/scripts/spiketest.py index 3bc97f0c4..6736c11be 100644 --- a/tests/scripts/spiketest.py +++ b/tests/scripts/spiketest.py @@ -15,9 +15,9 @@ import logging import threading -import psycopg -import psycopg_pool -from psycopg.rows import Row +import gaussdb +import gaussdb_pool +from gaussdb.rows import Row def main() -> None: @@ -28,9 +28,9 @@ def main() -> None: level=loglevel, format="%(asctime)s %(levelname)s %(message)s" ) - logging.getLogger("psycopg2.pool").setLevel(loglevel) + logging.getLogger("_GaussDB.pool").setLevel(loglevel) - with psycopg_pool.ConnectionPool( + with gaussdb_pool.ConnectionPool( opt.dsn, min_size=opt.min_size, max_size=opt.max_size, @@ -102,7 +102,7 @@ def _run(self, interval): time.sleep(interval) -class DelayedConnection(psycopg.Connection[Row]): +class DelayedConnection(gaussdb.Connection[Row]): """A connection adding a delay to the connection time.""" @classmethod diff --git a/tests/test_adapt.py b/tests/test_adapt.py index 8115ceb22..04fcaac4d 100644 --- a/tests/test_adapt.py +++ b/tests/test_adapt.py @@ -6,15 +6,15 @@ import pytest -import psycopg -from psycopg import errors as e -from psycopg import postgres, pq, sql -from psycopg.abc import Buffer -from psycopg.adapt import Dumper, Loader, PyFormat, Transformer -from psycopg._cmodule import _psycopg -from psycopg.postgres import types as builtins -from psycopg.types.array import ListBinaryDumper, ListDumper -from psycopg.types.string import StrBinaryDumper, StrDumper +import gaussdb +from gaussdb import errors as e +from gaussdb import postgres, pq, sql +from gaussdb.abc import Buffer +from gaussdb.adapt import Dumper, Loader, PyFormat, Transformer +from gaussdb._cmodule import _gaussdb +from gaussdb.postgres import types as builtins +from gaussdb.types.array import ListBinaryDumper, ListDumper +from gaussdb.types.string import StrBinaryDumper, StrDumper @pytest.mark.parametrize( @@ -59,7 +59,7 @@ def test_quote(data, result): ], ) def test_quote_none(data, result, global_adapters): - psycopg.adapters.register_dumper(str, StrNoneDumper) + gaussdb.adapters.register_dumper(str, StrNoneDumper) t = Transformer() dumper = t.get_dumper(data, PyFormat.TEXT) assert dumper.quote(data) == result @@ -81,8 +81,8 @@ def test_register_dumper_by_class_name(conn): @pytest.mark.crdb("skip", reason="global adapters don't affect crdb") def test_dump_global_ctx(conn_cls, dsn, global_adapters, pgconn): - psycopg.adapters.register_dumper(MyStr, make_bin_dumper("gb")) - psycopg.adapters.register_dumper(MyStr, make_dumper("gt")) + gaussdb.adapters.register_dumper(MyStr, make_bin_dumper("gb")) + gaussdb.adapters.register_dumper(MyStr, make_dumper("gt")) with conn_cls.connect(dsn) as conn: cur = conn.execute("select %s", [MyStr("hello")]) assert cur.fetchone() == ("hellogt",) @@ -172,7 +172,7 @@ def test_loader_protocol(conn): def test_subclass_loader(conn): # This might be a C fast object: make sure that the Python code is called - from psycopg.types.string import TextLoader + from gaussdb.types.string import TextLoader class MyTextLoader(TextLoader): def load(self, data): @@ -216,8 +216,8 @@ def test_register_loader_by_type_name(conn): @pytest.mark.crdb("skip", reason="global adapters don't affect crdb") def test_load_global_ctx(conn_cls, dsn, global_adapters): - psycopg.adapters.register_loader("text", make_loader("gt")) - psycopg.adapters.register_loader("text", make_bin_loader("gb")) + gaussdb.adapters.register_loader("text", make_loader("gt")) + gaussdb.adapters.register_loader("text", make_bin_loader("gb")) with conn_cls.connect(dsn) as conn: cur = conn.cursor(binary=False).execute("select 'hello'::text") assert cur.fetchone() == ("hellogt",) @@ -326,7 +326,7 @@ def test_list_dumper(conn, fmt_out): L: list[list[Any]] = [] L.append(L) - with pytest.raises(psycopg.DataError): + with pytest.raises(gaussdb.DataError): assert t.get_dumper(L, fmt_in) @@ -417,17 +417,17 @@ def test_no_cast_needed(conn, fmt_in): @pytest.mark.slow -@pytest.mark.skipif(_psycopg is None, reason="C module test") +@pytest.mark.skipif(_gaussdb is None, reason="C module test") def test_optimised_adapters(): # All the optimised adapters available c_adapters = {} - for n in dir(_psycopg): + for n in dir(_gaussdb): if n.startswith("_") or n in ("CDumper", "CLoader"): continue - obj = getattr(_psycopg, n) + obj = getattr(_gaussdb, n) if not isinstance(obj, type): continue - if not issubclass(obj, (_psycopg.CDumper, _psycopg.CLoader)): + if not issubclass(obj, (_gaussdb.CDumper, _gaussdb.CLoader)): continue c_adapters[n] = obj @@ -448,8 +448,8 @@ def test_optimised_adapters(): assert i >= 10 # Check that every optimised adapter is the optimised version of a Py one - for n in dir(psycopg.types): - mod = getattr(psycopg.types, n) + for n in dir(gaussdb.types): + mod = getattr(gaussdb.types, n) if not isinstance(mod, ModuleType): continue for n1 in dir(mod): diff --git a/tests/test_capabilities.py b/tests/test_capabilities.py index 674a083c2..76f5523ea 100644 --- a/tests/test_capabilities.py +++ b/tests/test_capabilities.py @@ -2,12 +2,12 @@ import pytest -from psycopg import _cmodule, pq +from gaussdb import _cmodule, pq try: - from psycopg import Capabilities, NotSupportedError, capabilities + from gaussdb import Capabilities, NotSupportedError, capabilities except ImportError: - # Allow to import the module with Psycopg 3.1 + # Allow to import the module with gaussdb.1 pass caps = [ @@ -67,7 +67,7 @@ def test_impl_build_error(monkeypatch): if pq.__impl__ == "binary": ver = _cmodule.__version__ assert ver - msg = "(imported from the psycopg[binary] package version {ver})" + msg = "(imported from the gaussdb[binary] package version {ver})" else: msg = "(imported from system libraries)" with pytest.raises(NotSupportedError, match=re.escape(msg)): diff --git a/tests/test_column.py b/tests/test_column.py index 111b632a3..3a3b161a1 100644 --- a/tests/test_column.py +++ b/tests/test_column.py @@ -2,7 +2,7 @@ import pytest -from psycopg.postgres import types as builtins +from gaussdb.postgres import types as builtins from .fix_crdb import crdb_encoding, is_crdb diff --git a/tests/test_concurrency.py b/tests/test_concurrency.py index 146843aa9..abe74ced7 100644 --- a/tests/test_concurrency.py +++ b/tests/test_concurrency.py @@ -13,8 +13,8 @@ import pytest -import psycopg -from psycopg import errors as e +import gaussdb +from gaussdb import errors as e @pytest.mark.slow @@ -38,7 +38,7 @@ def worker(): @pytest.mark.slow def test_commit_concurrency(conn): - # Check the condition reported in psycopg2#103 + # Check the condition reported in _GaussDB#103 # Because of bad status check, we commit even when a commit is already on # its way. We can detect this condition by the warnings. notices = queue.Queue() # type: ignore[var-annotated] @@ -66,14 +66,14 @@ def committer(): @pytest.mark.slow @pytest.mark.subprocess def test_multiprocess_close(dsn, tmpdir): - # Check the problem reported in psycopg2#829 + # Check the problem reported in _GaussDB#829 # Subprocess gcs the copy of the fd after fork so it closes connection. module = f"""\ import time -import psycopg +import gaussdb def thread(): - conn = psycopg.connect({dsn!r}) + conn = gaussdb.connect({dsn!r}) curs = conn.cursor() for i in range(10): curs.execute("select 1") @@ -179,7 +179,7 @@ def closer(): t.start() t0 = time.time() try: - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): conn.execute("select pg_sleep(1.0)") t1 = time.time() assert 0.2 < t1 - t0 < 0.4 @@ -209,7 +209,7 @@ def test_ctrl_c_handler(dsn): script = f"""\ import os import time -import psycopg +import gaussdb from threading import Thread def tired_of_life(): @@ -219,7 +219,7 @@ def tired_of_life(): t = Thread(target=tired_of_life, daemon=True) t.start() -with psycopg.connect({dsn!r}) as conn: +with gaussdb.connect({dsn!r}) as conn: cur = conn.cursor() ctrl_c = False try: @@ -229,12 +229,12 @@ def tired_of_life(): assert ctrl_c, "ctrl-c not received" assert ( - conn.info.transaction_status == psycopg.pq.TransactionStatus.INERROR + conn.info.transaction_status == gaussdb.pq.TransactionStatus.INERROR ), f"transaction status: {{conn.info.transaction_status!r}}" conn.rollback() assert ( - conn.info.transaction_status == psycopg.pq.TransactionStatus.IDLE + conn.info.transaction_status == gaussdb.pq.TransactionStatus.IDLE ), f"transaction status: {{conn.info.transaction_status!r}}" cur.execute("select 1") @@ -259,9 +259,9 @@ def test_ctrl_c(conn, dsn): APPNAME = "test_ctrl_c" script = f"""\ -import psycopg +import gaussdb -with psycopg.connect({dsn!r}, application_name={APPNAME!r}) as conn: +with gaussdb.connect({dsn!r}, application_name={APPNAME!r}) as conn: conn.execute("select pg_sleep(60)") """ @@ -326,7 +326,7 @@ def test_eintr(dsn, itimername, signame): script = f"""\ import signal -import psycopg +import gaussdb def signal_handler(signum, frame): assert signum == {sig!r} @@ -338,7 +338,7 @@ def signal_handler(signum, frame): signal.siginterrupt({sig!r}, False) -with psycopg.connect({dsn!r}) as conn: +with gaussdb.connect({dsn!r}) as conn: # Fire an interrupt signal every 0.25 seconds signal.setitimer({itimer!r}, 0.25, 0.25) @@ -363,21 +363,21 @@ def signal_handler(signum, frame): reason="problematic behavior only exhibited via fork", ) def test_segfault_on_fork_close(dsn): - # https://github.com/psycopg/psycopg/issues/300 + # https://github.com/gaussdb/gaussdb/issues/300 script = f"""\ import gc -import psycopg +import gaussdb from multiprocessing import Pool def test(arg): - conn1 = psycopg.connect({dsn!r}) + conn1 = gaussdb.connect({dsn!r}) conn1.close() conn1 = None gc.collect() return 1 if __name__ == '__main__': - conn = psycopg.connect({dsn!r}) + conn = gaussdb.connect({dsn!r}) with Pool(2) as p: pool_result = p.map_async(test, [1, 2]) pool_result.wait(timeout=5) @@ -405,14 +405,14 @@ def test_concurrent_close(dsn, conn): def worker(): try: conn.execute("select pg_sleep(3)") - except psycopg.OperationalError: + except gaussdb.OperationalError: pass # expected t0 = time.time() th = threading.Thread(target=worker) th.start() time.sleep(0.5) - with psycopg.connect(dsn, autocommit=True) as conn1: + with gaussdb.connect(dsn, autocommit=True) as conn1: cur = conn1.execute("select query from pg_stat_activity where pid = %s", [pid]) assert cur.fetchone() conn.close() @@ -445,7 +445,7 @@ def worker(unlock, wait_on): if what == "error": 1 / 0 elif what == "rollback": - raise psycopg.Rollback() + raise gaussdb.Rollback() else: assert what == "commit" @@ -454,7 +454,7 @@ def worker(unlock, wait_on): assert isinstance(ex.value.__context__, ZeroDivisionError) elif what == "rollback": assert "transaction rollback" in str(ex.value) - assert isinstance(ex.value.__context__, psycopg.Rollback) + assert isinstance(ex.value.__context__, gaussdb.Rollback) else: assert "transaction commit" in str(ex.value) diff --git a/tests/test_concurrency_async.py b/tests/test_concurrency_async.py index 6537f9a58..bb2b0acbf 100644 --- a/tests/test_concurrency_async.py +++ b/tests/test_concurrency_async.py @@ -11,13 +11,13 @@ import pytest -import psycopg -from psycopg import errors as e +import gaussdb +from gaussdb import errors as e @pytest.mark.slow async def test_commit_concurrency(aconn): - # Check the condition reported in psycopg2#103 + # Check the condition reported in _GaussDB#103 # Because of bad status check, we commit even when a commit is already on # its way. We can detect this condition by the warnings. notices = Queue() # type: ignore[var-annotated] @@ -132,7 +132,7 @@ async def closer(): t = create_task(closer()) t0 = time.time() try: - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): await aconn.execute("select pg_sleep(1.0)") t1 = time.time() assert 0.2 < t1 - t0 < 0.4 @@ -154,27 +154,27 @@ def test_ctrl_c_handler(dsn): script = f"""\ import signal import asyncio -import psycopg +import gaussdb async def main(): ctrl_c = False loop = asyncio.get_event_loop() - async with await psycopg.AsyncConnection.connect({dsn!r}) as conn: + async with await gaussdb.AsyncConnection.connect({dsn!r}) as conn: loop.add_signal_handler(signal.SIGINT, conn.cancel) cur = conn.cursor() try: await cur.execute("select pg_sleep(2)") - except psycopg.errors.QueryCanceled: + except gaussdb.errors.QueryCanceled: ctrl_c = True assert ctrl_c, "ctrl-c not received" assert ( - conn.info.transaction_status == psycopg.pq.TransactionStatus.INERROR + conn.info.transaction_status == gaussdb.pq.TransactionStatus.INERROR ), f"transaction status: {{conn.info.transaction_status!r}}" await conn.rollback() assert ( - conn.info.transaction_status == psycopg.pq.TransactionStatus.IDLE + conn.info.transaction_status == gaussdb.pq.TransactionStatus.IDLE ), f"transaction status: {{conn.info.transaction_status!r}}" await cur.execute("select 1") @@ -205,16 +205,16 @@ async def main(): ) @pytest.mark.crdb("skip") def test_ctrl_c(conn, dsn): - # https://github.com/psycopg/psycopg/issues/543 + # https://github.com/gaussdb/gaussdb/issues/543 conn.autocommit = True APPNAME = "test_ctrl_c" script = f"""\ import asyncio -import psycopg +import gaussdb async def main(): - async with await psycopg.AsyncConnection.connect( + async with await gaussdb.AsyncConnection.connect( {dsn!r}, application_name={APPNAME!r} ) as conn: await conn.execute("select pg_sleep(5)") @@ -284,7 +284,7 @@ def test_eintr(dsn, itimername, signame): script = f"""\ import signal import asyncio -import psycopg +import gaussdb def signal_handler(signum, frame): assert signum == {sig!r} @@ -297,7 +297,7 @@ def signal_handler(signum, frame): async def main(): - async with await psycopg.AsyncConnection.connect({dsn!r}) as conn: + async with await gaussdb.AsyncConnection.connect({dsn!r}) as conn: # Fire an interrupt signal every 0.25 seconds signal.setitimer({itimer!r}, 0.25, 0.25) @@ -332,7 +332,7 @@ async def test_concurrent_close(dsn, aconn): async def worker(): try: await aconn.execute("select pg_sleep(3)") - except psycopg.OperationalError: + except gaussdb.OperationalError: pass # expected t0 = time.time() @@ -340,7 +340,7 @@ async def worker(): await asyncio.sleep(0.5) async def test(): - async with await psycopg.AsyncConnection.connect(dsn, autocommit=True) as conn1: + async with await gaussdb.AsyncConnection.connect(dsn, autocommit=True) as conn1: cur = await conn1.execute( "select query from pg_stat_activity where pid = %s", [pid] ) @@ -377,7 +377,7 @@ async def worker(unlock, wait_on): if what == "error": 1 / 0 elif what == "rollback": - raise psycopg.Rollback() + raise gaussdb.Rollback() else: assert what == "commit" @@ -386,7 +386,7 @@ async def worker(unlock, wait_on): assert isinstance(ex.value.__context__, ZeroDivisionError) elif what == "rollback": assert "transaction rollback" in str(ex.value) - assert isinstance(ex.value.__context__, psycopg.Rollback) + assert isinstance(ex.value.__context__, gaussdb.Rollback) else: assert "transaction commit" in str(ex.value) diff --git a/tests/test_connection.py b/tests/test_connection.py index e1520a3b0..5874647dd 100644 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -11,11 +11,11 @@ import pytest -import psycopg -from psycopg import errors as e -from psycopg import pq -from psycopg.rows import tuple_row -from psycopg.conninfo import conninfo_to_dict, timeout_from_conninfo +import gaussdb +from gaussdb import errors as e +from gaussdb import pq +from gaussdb.rows import tuple_row +from gaussdb.conninfo import conninfo_to_dict, timeout_from_conninfo from .acompat import is_async, skip_async, skip_sync, sleep from .test_adapt import make_bin_dumper, make_dumper @@ -33,7 +33,7 @@ def test_connect(conn_cls, dsn): def test_connect_bad(conn_cls): - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): conn_cls.connect("dbname=nosuchdb") @@ -53,7 +53,7 @@ class MyString(str): def test_connect_timeout(conn_cls, proxy): with proxy.deaf_listen(): t0 = time.time() - with pytest.raises(psycopg.OperationalError, match="timeout expired"): + with pytest.raises(gaussdb.OperationalError, match="timeout expired"): conn_cls.connect(proxy.client_dsn, connect_timeout=2) elapsed = time.time() - t0 assert elapsed == pytest.approx(2.0, 0.1) @@ -66,7 +66,7 @@ def test_multi_hosts(conn_cls, proxy, dsn, monkeypatch): args["host"] = f"{proxy.client_host},{proxy.server_host}" args["port"] = f"{proxy.client_port},{proxy.server_port}" args.pop("hostaddr", None) - monkeypatch.setattr(psycopg.conninfo, "_DEFAULT_CONNECT_TIMEOUT", 2) + monkeypatch.setattr(gaussdb.conninfo, "_DEFAULT_CONNECT_TIMEOUT", 2) with proxy.deaf_listen(): t0 = time.time() with conn_cls.connect(**args) as conn: @@ -108,13 +108,13 @@ def test_close(conn): assert conn.closed assert conn.pgconn.status == pq.ConnStatus.BAD - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): cur.execute("select 1") @pytest.mark.crdb_skip("pg_terminate_backend") def test_broken(conn): - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): conn.execute("select pg_terminate_backend(%s)", [conn.pgconn.backend_pid]) assert conn.closed assert conn.broken @@ -125,12 +125,12 @@ def test_broken(conn): def test_cursor_closed(conn): conn.close() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): with conn.cursor("foo"): pass - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): conn.cursor("foo") - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): conn.cursor() @@ -165,7 +165,7 @@ def test_connection_warn_close(conn_cls, dsn, recwarn, gc_collect): conn = conn_cls.connect(dsn) try: conn.execute("select wat") - except psycopg.ProgrammingError: + except gaussdb.ProgrammingError: pass del conn gc_collect() @@ -217,7 +217,7 @@ def test_context_close(conn): @pytest.mark.crdb_skip("pg_terminate_backend") def test_context_inerror_rollback_no_clobber(conn_cls, conn, dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg") + caplog.set_level(logging.WARNING, logger="gaussdb") with pytest.raises(ZeroDivisionError): with conn_cls.connect(dsn) as conn2: @@ -235,7 +235,7 @@ def test_context_inerror_rollback_no_clobber(conn_cls, conn, dsn, caplog): @pytest.mark.crdb_skip("copy") def test_context_active_rollback_no_clobber(conn_cls, dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg") + caplog.set_level(logging.WARNING, logger="gaussdb") with pytest.raises(ZeroDivisionError): with conn_cls.connect(dsn) as conn: @@ -273,7 +273,7 @@ def test_commit(conn): assert res.get_value(0, 0) == b"1" conn.close() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): conn.commit() @@ -309,7 +309,7 @@ def test_rollback(conn): assert res.ntuples == 0 conn.close() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): conn.rollback() @@ -340,11 +340,11 @@ def test_auto_transaction_fail(conn): cur.execute("insert into foo values (1)") assert conn.pgconn.transaction_status == pq.TransactionStatus.INTRANS - with pytest.raises(psycopg.DatabaseError): + with pytest.raises(gaussdb.DatabaseError): cur.execute("meh") assert conn.pgconn.transaction_status == pq.TransactionStatus.INERROR - with pytest.raises(psycopg.errors.InFailedSqlTransaction): + with pytest.raises(gaussdb.errors.InFailedSqlTransaction): cur.execute("select 1") conn.commit() @@ -410,17 +410,17 @@ def test_autocommit_intrans(conn): cur.execute("select 1") assert cur.fetchone() == (1,) assert conn.pgconn.transaction_status == pq.TransactionStatus.INTRANS - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): conn.set_autocommit(True) assert not conn.autocommit def test_autocommit_inerror(conn): cur = conn.cursor() - with pytest.raises(psycopg.DatabaseError): + with pytest.raises(gaussdb.DatabaseError): cur.execute("meh") assert conn.pgconn.transaction_status == pq.TransactionStatus.INERROR - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): conn.set_autocommit(True) assert not conn.autocommit @@ -428,7 +428,7 @@ def test_autocommit_inerror(conn): def test_autocommit_unknown(conn): conn.close() assert conn.pgconn.transaction_status == pq.TransactionStatus.UNKNOWN - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): conn.set_autocommit(True) assert not conn.autocommit @@ -460,7 +460,7 @@ def fake_connect(conninfo, *, timeout=0.0): yield setpgenv({}) - monkeypatch.setattr(psycopg.generators, "connect", fake_connect) + monkeypatch.setattr(gaussdb.generators, "connect", fake_connect) conn = conn_cls.connect(*args, **kwargs) assert conninfo_to_dict(got_conninfo) == conninfo_to_dict(want) conn.close() @@ -471,7 +471,7 @@ def fake_connect(conninfo, *, timeout=0.0): [ (("host=foo", "host=bar"), {}, TypeError), (("", ""), {}, TypeError), - ((), {"nosuchparam": 42}, psycopg.ProgrammingError), + ((), {"nosuchparam": 42}, gaussdb.ProgrammingError), ], ) def test_connect_badargs(conn_cls, monkeypatch, pgconn, args, kwargs, exctype): @@ -482,14 +482,14 @@ def test_connect_badargs(conn_cls, monkeypatch, pgconn, args, kwargs, exctype): @pytest.mark.crdb_skip("pg_terminate_backend") def test_broken_connection(conn): cur = conn.cursor() - with pytest.raises(psycopg.DatabaseError): + with pytest.raises(gaussdb.DatabaseError): cur.execute("select pg_terminate_backend(pg_backend_pid())") assert conn.closed @pytest.mark.crdb_skip("do") def test_notice_handlers(conn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg") + caplog.set_level(logging.WARNING, logger="gaussdb") messages = [] severities = [] @@ -585,14 +585,14 @@ def test_str(conn): def test_fileno(conn): assert conn.fileno() == conn.pgconn.socket conn.close() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): conn.fileno() def test_cursor_factory(conn): - assert conn.cursor_factory is psycopg.Cursor + assert conn.cursor_factory is gaussdb.Cursor - class MyCursor(psycopg.Cursor[psycopg.rows.Row]): + class MyCursor(gaussdb.Cursor[gaussdb.rows.Row]): pass conn.cursor_factory = MyCursor @@ -605,7 +605,7 @@ class MyCursor(psycopg.Cursor[psycopg.rows.Row]): def test_cursor_factory_connect(conn_cls, dsn): - class MyCursor(psycopg.Cursor[psycopg.rows.Row]): + class MyCursor(gaussdb.Cursor[gaussdb.rows.Row]): pass with conn_cls.connect(dsn, cursor_factory=MyCursor) as conn: @@ -615,9 +615,9 @@ class MyCursor(psycopg.Cursor[psycopg.rows.Row]): def test_server_cursor_factory(conn): - assert conn.server_cursor_factory is psycopg.ServerCursor + assert conn.server_cursor_factory is gaussdb.ServerCursor - class MyServerCursor(psycopg.ServerCursor[psycopg.rows.Row]): + class MyServerCursor(gaussdb.ServerCursor[gaussdb.rows.Row]): pass conn.server_cursor_factory = MyServerCursor @@ -648,7 +648,7 @@ def test_transaction_param_readonly_property(conn, param): def test_set_transaction_param_implicit(conn, param, autocommit): conn.set_autocommit(autocommit) for value in param.values: - if value == psycopg.IsolationLevel.SERIALIZABLE: + if value == gaussdb.IsolationLevel.SERIALIZABLE: pytest.skip( "GaussDB currently does not support SERIALIZABLE, \ which is equivalent to REPEATABLE READ" @@ -675,7 +675,7 @@ def test_set_transaction_param_reset(conn, param): conn.commit() for value in param.values: - if value == psycopg.IsolationLevel.SERIALIZABLE: + if value == gaussdb.IsolationLevel.SERIALIZABLE: pytest.skip( "GaussDB currently does not support SERIALIZABLE, \ which is equivalent to REPEATABLE READ" @@ -698,7 +698,7 @@ def test_set_transaction_param_reset(conn, param): def test_set_transaction_param_block(conn, param, autocommit): conn.set_autocommit(autocommit) for value in param.values: - if value == psycopg.IsolationLevel.SERIALIZABLE: + if value == gaussdb.IsolationLevel.SERIALIZABLE: pytest.skip( "GaussDB currently does not support SERIALIZABLE, \ which is equivalent to REPEATABLE READ" @@ -716,7 +716,7 @@ def test_set_transaction_param_block(conn, param, autocommit): def test_set_transaction_param_not_intrans_implicit(conn, param): conn.execute("select 1") value = param.values[0] - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): getattr(conn, f"set_{param.name}")(value) @@ -724,7 +724,7 @@ def test_set_transaction_param_not_intrans_implicit(conn, param): def test_set_transaction_param_not_intrans_block(conn, param): value = param.values[0] with conn.transaction(): - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): getattr(conn, f"set_{param.name}")(value) @@ -733,7 +733,7 @@ def test_set_transaction_param_not_intrans_external(conn, param): value = param.values[0] conn.set_autocommit(True) conn.execute("begin") - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): getattr(conn, f"set_{param.name}")(value) @@ -773,8 +773,8 @@ def test_set_transaction_param_strange(conn): with pytest.raises(ValueError): conn.set_isolation_level(val) - conn.set_isolation_level(psycopg.IsolationLevel.SERIALIZABLE.value) - assert conn.isolation_level is psycopg.IsolationLevel.SERIALIZABLE + conn.set_isolation_level(gaussdb.IsolationLevel.SERIALIZABLE.value) + assert conn.isolation_level is gaussdb.IsolationLevel.SERIALIZABLE conn.set_read_only(1) assert conn.read_only is True @@ -789,8 +789,8 @@ def test_set_transaction_param_strange_property(conn): with pytest.raises(ValueError): conn.isolation_level = val - conn.isolation_level = psycopg.IsolationLevel.SERIALIZABLE.value - assert conn.isolation_level is psycopg.IsolationLevel.SERIALIZABLE + conn.isolation_level = gaussdb.IsolationLevel.SERIALIZABLE.value + assert conn.isolation_level is gaussdb.IsolationLevel.SERIALIZABLE conn.read_only = 1 assert conn.read_only is True @@ -808,7 +808,7 @@ def test_get_connection_params(conn_cls, dsn, kwargs, exp, setpgenv): def test_connect_context_adapters(conn_cls, dsn): - ctx = psycopg.adapt.AdaptersMap(psycopg.adapters) + ctx = gaussdb.adapt.AdaptersMap(gaussdb.adapters) ctx.register_dumper(str, make_bin_dumper("b")) ctx.register_dumper(str, make_dumper("t")) @@ -847,7 +847,7 @@ def test_cancel_safe_closed(conn): @pytest.mark.slow @pytest.mark.timing def test_cancel_safe_error(conn_cls, proxy, caplog): - caplog.set_level(logging.WARNING, logger="psycopg") + caplog.set_level(logging.WARNING, logger="gaussdb") proxy.start() with conn_cls.connect(proxy.client_dsn) as conn: proxy.stop() @@ -905,12 +905,12 @@ def test_right_exception_on_server_disconnect(conn): @pytest.mark.gaussdb_skip("error result not returned") @pytest.mark.opengauss_skip("error result not returned") def test_right_exception_on_session_timeout(conn): - want_ex: type[psycopg.Error] = e.IdleInTransactionSessionTimeout + want_ex: type[gaussdb.Error] = e.IdleInTransactionSessionTimeout if sys.platform == "win32": # No idea why this is needed and `test_right_exception_on_server_disconnect` # works instead. Maybe the difference lies in the server we are testing # with, not in the client. - want_ex = psycopg.OperationalError + want_ex = gaussdb.OperationalError conn.execute("SET SESSION idle_in_transaction_timeout = 100") sleep(0.2) @@ -936,5 +936,5 @@ def test_connect_tsa(conn_cls, dsn, mode): def test_connect_tsa_bad(conn_cls, dsn, mode): # NOTE: assume that the test database is a "primary" params = conninfo_to_dict(dsn, target_session_attrs=mode) - with pytest.raises(psycopg.OperationalError, match=mode): + with pytest.raises(gaussdb.OperationalError, match=mode): conn_cls.connect(**params) diff --git a/tests/test_connection_async.py b/tests/test_connection_async.py index 271b7a0a2..51675d83f 100644 --- a/tests/test_connection_async.py +++ b/tests/test_connection_async.py @@ -8,11 +8,11 @@ import pytest -import psycopg -from psycopg import errors as e -from psycopg import pq -from psycopg.rows import tuple_row -from psycopg.conninfo import conninfo_to_dict, timeout_from_conninfo +import gaussdb +from gaussdb import errors as e +from gaussdb import pq +from gaussdb.rows import tuple_row +from gaussdb.conninfo import conninfo_to_dict, timeout_from_conninfo from .acompat import asleep, is_async, skip_async, skip_sync from .test_adapt import make_bin_dumper, make_dumper @@ -30,7 +30,7 @@ async def test_connect(aconn_cls, dsn): async def test_connect_bad(aconn_cls): - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): await aconn_cls.connect("dbname=nosuchdb") @@ -49,7 +49,7 @@ class MyString(str): async def test_connect_timeout(aconn_cls, proxy): with proxy.deaf_listen(): t0 = time.time() - with pytest.raises(psycopg.OperationalError, match="timeout expired"): + with pytest.raises(gaussdb.OperationalError, match="timeout expired"): await aconn_cls.connect(proxy.client_dsn, connect_timeout=2) elapsed = time.time() - t0 assert elapsed == pytest.approx(2.0, 0.1) @@ -62,7 +62,7 @@ async def test_multi_hosts(aconn_cls, proxy, dsn, monkeypatch): args["host"] = f"{proxy.client_host},{proxy.server_host}" args["port"] = f"{proxy.client_port},{proxy.server_port}" args.pop("hostaddr", None) - monkeypatch.setattr(psycopg.conninfo, "_DEFAULT_CONNECT_TIMEOUT", 2) + monkeypatch.setattr(gaussdb.conninfo, "_DEFAULT_CONNECT_TIMEOUT", 2) with proxy.deaf_listen(): t0 = time.time() async with await aconn_cls.connect(**args) as conn: @@ -104,13 +104,13 @@ async def test_close(aconn): assert aconn.closed assert aconn.pgconn.status == pq.ConnStatus.BAD - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): await cur.execute("select 1") @pytest.mark.crdb_skip("pg_terminate_backend") async def test_broken(aconn): - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): await aconn.execute( "select pg_terminate_backend(%s)", [aconn.pgconn.backend_pid] ) @@ -123,12 +123,12 @@ async def test_broken(aconn): async def test_cursor_closed(aconn): await aconn.close() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): async with aconn.cursor("foo"): pass - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): aconn.cursor("foo") - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): aconn.cursor() @@ -161,7 +161,7 @@ async def test_connection_warn_close(aconn_cls, dsn, recwarn, gc_collect): conn = await aconn_cls.connect(dsn) try: await conn.execute("select wat") - except psycopg.ProgrammingError: + except gaussdb.ProgrammingError: pass del conn gc_collect() @@ -213,7 +213,7 @@ async def test_context_close(aconn): @pytest.mark.crdb_skip("pg_terminate_backend") async def test_context_inerror_rollback_no_clobber(aconn_cls, conn, dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg") + caplog.set_level(logging.WARNING, logger="gaussdb") with pytest.raises(ZeroDivisionError): async with await aconn_cls.connect(dsn) as conn2: @@ -232,7 +232,7 @@ async def test_context_inerror_rollback_no_clobber(aconn_cls, conn, dsn, caplog) @pytest.mark.crdb_skip("copy") async def test_context_active_rollback_no_clobber(aconn_cls, dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg") + caplog.set_level(logging.WARNING, logger="gaussdb") with pytest.raises(ZeroDivisionError): async with await aconn_cls.connect(dsn) as conn: @@ -270,7 +270,7 @@ async def test_commit(aconn): assert res.get_value(0, 0) == b"1" await aconn.close() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): await aconn.commit() @@ -306,7 +306,7 @@ async def test_rollback(aconn): assert res.ntuples == 0 await aconn.close() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): await aconn.rollback() @@ -337,11 +337,11 @@ async def test_auto_transaction_fail(aconn): await cur.execute("insert into foo values (1)") assert aconn.pgconn.transaction_status == pq.TransactionStatus.INTRANS - with pytest.raises(psycopg.DatabaseError): + with pytest.raises(gaussdb.DatabaseError): await cur.execute("meh") assert aconn.pgconn.transaction_status == pq.TransactionStatus.INERROR - with pytest.raises(psycopg.errors.InFailedSqlTransaction): + with pytest.raises(gaussdb.errors.InFailedSqlTransaction): await cur.execute("select 1") await aconn.commit() @@ -407,17 +407,17 @@ async def test_autocommit_intrans(aconn): await cur.execute("select 1") assert await cur.fetchone() == (1,) assert aconn.pgconn.transaction_status == pq.TransactionStatus.INTRANS - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): await aconn.set_autocommit(True) assert not aconn.autocommit async def test_autocommit_inerror(aconn): cur = aconn.cursor() - with pytest.raises(psycopg.DatabaseError): + with pytest.raises(gaussdb.DatabaseError): await cur.execute("meh") assert aconn.pgconn.transaction_status == pq.TransactionStatus.INERROR - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): await aconn.set_autocommit(True) assert not aconn.autocommit @@ -425,7 +425,7 @@ async def test_autocommit_inerror(aconn): async def test_autocommit_unknown(aconn): await aconn.close() assert aconn.pgconn.transaction_status == pq.TransactionStatus.UNKNOWN - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): await aconn.set_autocommit(True) assert not aconn.autocommit @@ -457,7 +457,7 @@ def fake_connect(conninfo, *, timeout=0.0): yield setpgenv({}) - monkeypatch.setattr(psycopg.generators, "connect", fake_connect) + monkeypatch.setattr(gaussdb.generators, "connect", fake_connect) conn = await aconn_cls.connect(*args, **kwargs) assert conninfo_to_dict(got_conninfo) == conninfo_to_dict(want) await conn.close() @@ -468,7 +468,7 @@ def fake_connect(conninfo, *, timeout=0.0): [ (("host=foo", "host=bar"), {}, TypeError), (("", ""), {}, TypeError), - ((), {"nosuchparam": 42}, psycopg.ProgrammingError), + ((), {"nosuchparam": 42}, gaussdb.ProgrammingError), ], ) async def test_connect_badargs(aconn_cls, monkeypatch, pgconn, args, kwargs, exctype): @@ -479,14 +479,14 @@ async def test_connect_badargs(aconn_cls, monkeypatch, pgconn, args, kwargs, exc @pytest.mark.crdb_skip("pg_terminate_backend") async def test_broken_connection(aconn): cur = aconn.cursor() - with pytest.raises(psycopg.DatabaseError): + with pytest.raises(gaussdb.DatabaseError): await cur.execute("select pg_terminate_backend(pg_backend_pid())") assert aconn.closed @pytest.mark.crdb_skip("do") async def test_notice_handlers(aconn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg") + caplog.set_level(logging.WARNING, logger="gaussdb") messages = [] severities = [] @@ -584,14 +584,14 @@ async def test_str(aconn): async def test_fileno(aconn): assert aconn.fileno() == aconn.pgconn.socket await aconn.close() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): aconn.fileno() async def test_cursor_factory(aconn): - assert aconn.cursor_factory is psycopg.AsyncCursor + assert aconn.cursor_factory is gaussdb.AsyncCursor - class MyCursor(psycopg.AsyncCursor[psycopg.rows.Row]): + class MyCursor(gaussdb.AsyncCursor[gaussdb.rows.Row]): pass aconn.cursor_factory = MyCursor @@ -603,7 +603,7 @@ class MyCursor(psycopg.AsyncCursor[psycopg.rows.Row]): async def test_cursor_factory_connect(aconn_cls, dsn): - class MyCursor(psycopg.AsyncCursor[psycopg.rows.Row]): + class MyCursor(gaussdb.AsyncCursor[gaussdb.rows.Row]): pass async with await aconn_cls.connect(dsn, cursor_factory=MyCursor) as conn: @@ -613,9 +613,9 @@ class MyCursor(psycopg.AsyncCursor[psycopg.rows.Row]): async def test_server_cursor_factory(aconn): - assert aconn.server_cursor_factory is psycopg.AsyncServerCursor + assert aconn.server_cursor_factory is gaussdb.AsyncServerCursor - class MyServerCursor(psycopg.AsyncServerCursor[psycopg.rows.Row]): + class MyServerCursor(gaussdb.AsyncServerCursor[gaussdb.rows.Row]): pass aconn.server_cursor_factory = MyServerCursor @@ -646,7 +646,7 @@ async def test_transaction_param_readonly_property(aconn, param): async def test_set_transaction_param_implicit(aconn, param, autocommit): await aconn.set_autocommit(autocommit) for value in param.values: - if value == psycopg.IsolationLevel.SERIALIZABLE: + if value == gaussdb.IsolationLevel.SERIALIZABLE: pytest.skip( "GaussDB currently does not support SERIALIZABLE, \ which is equivalent to REPEATABLE READ" @@ -673,7 +673,7 @@ async def test_set_transaction_param_reset(aconn, param): await aconn.commit() for value in param.values: - if value == psycopg.IsolationLevel.SERIALIZABLE: + if value == gaussdb.IsolationLevel.SERIALIZABLE: pytest.skip( "GaussDB currently does not support SERIALIZABLE, \ which is equivalent to REPEATABLE READ" @@ -700,7 +700,7 @@ async def test_set_transaction_param_reset(aconn, param): async def test_set_transaction_param_block(aconn, param, autocommit): await aconn.set_autocommit(autocommit) for value in param.values: - if value == psycopg.IsolationLevel.SERIALIZABLE: + if value == gaussdb.IsolationLevel.SERIALIZABLE: pytest.skip( "GaussDB currently does not support SERIALIZABLE, \ which is equivalent to REPEATABLE READ" @@ -718,7 +718,7 @@ async def test_set_transaction_param_block(aconn, param, autocommit): async def test_set_transaction_param_not_intrans_implicit(aconn, param): await aconn.execute("select 1") value = param.values[0] - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): await getattr(aconn, f"set_{param.name}")(value) @@ -726,7 +726,7 @@ async def test_set_transaction_param_not_intrans_implicit(aconn, param): async def test_set_transaction_param_not_intrans_block(aconn, param): value = param.values[0] async with aconn.transaction(): - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): await getattr(aconn, f"set_{param.name}")(value) @@ -735,7 +735,7 @@ async def test_set_transaction_param_not_intrans_external(aconn, param): value = param.values[0] await aconn.set_autocommit(True) await aconn.execute("begin") - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): await getattr(aconn, f"set_{param.name}")(value) @@ -777,8 +777,8 @@ async def test_set_transaction_param_strange(aconn): with pytest.raises(ValueError): await aconn.set_isolation_level(val) - await aconn.set_isolation_level(psycopg.IsolationLevel.SERIALIZABLE.value) - assert aconn.isolation_level is psycopg.IsolationLevel.SERIALIZABLE + await aconn.set_isolation_level(gaussdb.IsolationLevel.SERIALIZABLE.value) + assert aconn.isolation_level is gaussdb.IsolationLevel.SERIALIZABLE await aconn.set_read_only(1) assert aconn.read_only is True @@ -793,8 +793,8 @@ def test_set_transaction_param_strange_property(conn): with pytest.raises(ValueError): conn.isolation_level = val - conn.isolation_level = psycopg.IsolationLevel.SERIALIZABLE.value - assert conn.isolation_level is psycopg.IsolationLevel.SERIALIZABLE + conn.isolation_level = gaussdb.IsolationLevel.SERIALIZABLE.value + assert conn.isolation_level is gaussdb.IsolationLevel.SERIALIZABLE conn.read_only = 1 assert conn.read_only is True @@ -812,7 +812,7 @@ async def test_get_connection_params(aconn_cls, dsn, kwargs, exp, setpgenv): async def test_connect_context_adapters(aconn_cls, dsn): - ctx = psycopg.adapt.AdaptersMap(psycopg.adapters) + ctx = gaussdb.adapt.AdaptersMap(gaussdb.adapters) ctx.register_dumper(str, make_bin_dumper("b")) ctx.register_dumper(str, make_dumper("t")) @@ -851,7 +851,7 @@ async def test_cancel_safe_closed(aconn): @pytest.mark.slow @pytest.mark.timing async def test_cancel_safe_error(aconn_cls, proxy, caplog): - caplog.set_level(logging.WARNING, logger="psycopg") + caplog.set_level(logging.WARNING, logger="gaussdb") proxy.start() async with await aconn_cls.connect(proxy.client_dsn) as aconn: proxy.stop() @@ -911,12 +911,12 @@ async def test_right_exception_on_server_disconnect(aconn): @pytest.mark.gaussdb_skip("error result not returned") @pytest.mark.opengauss_skip("error result not returned") async def test_right_exception_on_session_timeout(aconn): - want_ex: type[psycopg.Error] = e.IdleInTransactionSessionTimeout + want_ex: type[gaussdb.Error] = e.IdleInTransactionSessionTimeout if sys.platform == "win32": # No idea why this is needed and `test_right_exception_on_server_disconnect` # works instead. Maybe the difference lies in the server we are testing # with, not in the client. - want_ex = psycopg.OperationalError + want_ex = gaussdb.OperationalError await aconn.execute("SET SESSION idle_in_transaction_timeout = 100") await asleep(0.2) @@ -942,5 +942,5 @@ async def test_connect_tsa(aconn_cls, dsn, mode): async def test_connect_tsa_bad(aconn_cls, dsn, mode): # NOTE: assume that the test database is a "primary" params = conninfo_to_dict(dsn, target_session_attrs=mode) - with pytest.raises(psycopg.OperationalError, match=mode): + with pytest.raises(gaussdb.OperationalError, match=mode): await aconn_cls.connect(**params) diff --git a/tests/test_connection_info.py b/tests/test_connection_info.py index c6b3b3e6b..575ca7249 100644 --- a/tests/test_connection_info.py +++ b/tests/test_connection_info.py @@ -2,9 +2,9 @@ import pytest -import psycopg -from psycopg.conninfo import conninfo_to_dict, make_conninfo -from psycopg._encodings import pg2pyenc +import gaussdb +from gaussdb.conninfo import conninfo_to_dict, make_conninfo +from gaussdb._encodings import pg2pyenc from .fix_crdb import crdb_encoding @@ -19,7 +19,7 @@ def test_attrs(conn, attr): else: info_attr = pgconn_attr = attr - if info_attr == "hostaddr" and psycopg.pq.version() < 120000: + if info_attr == "hostaddr" and gaussdb.pq.version() < 120000: pytest.skip("hostaddr not supported on libpq < 12") info_val = getattr(conn.info, info_attr) @@ -27,20 +27,20 @@ def test_attrs(conn, attr): assert info_val == pgconn_val conn.close() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): getattr(conn.info, info_attr) @pytest.mark.libpq("< 12") def test_hostaddr_not_supported(conn): - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): conn.info.hostaddr def test_port(conn): assert conn.info.port == int(conn.pgconn.port.decode()) conn.close() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): conn.info.port @@ -126,8 +126,8 @@ def test_pipeline_status_no_pipeline(conn): @pytest.mark.opengauss_skip("This method PGconn.info is not implemented in openGauss") def test_no_password(dsn): dsn2 = make_conninfo(dsn, password="the-pass-word") - pgconn = psycopg.pq.PGconn.connect_start(dsn2.encode()) - info = psycopg.ConnectionInfo(pgconn) + pgconn = gaussdb.pq.PGconn.connect_start(dsn2.encode()) + info = gaussdb.ConnectionInfo(pgconn) assert info.password == "the-pass-word" assert "password" not in info.get_parameters() assert info.get_parameters()["dbname"] == info.dbname @@ -137,8 +137,8 @@ def test_no_password(dsn): @pytest.mark.opengauss_skip("This method PGconn.info is not implemented in openGauss") def test_dsn_no_password(dsn): dsn2 = make_conninfo(dsn, password="the-pass-word") - pgconn = psycopg.pq.PGconn.connect_start(dsn2.encode()) - info = psycopg.ConnectionInfo(pgconn) + pgconn = gaussdb.pq.PGconn.connect_start(dsn2.encode()) + info = gaussdb.ConnectionInfo(pgconn) assert info.password == "the-pass-word" assert "password" not in info.dsn assert f"dbname={info.dbname}" in info.dsn @@ -158,7 +158,7 @@ def test_server_version(conn): def test_error_message(conn): assert conn.info.error_message == "" - with pytest.raises(psycopg.ProgrammingError) as ex: + with pytest.raises(gaussdb.ProgrammingError) as ex: conn.execute("wat") assert conn.info.error_message @@ -174,7 +174,7 @@ def test_backend_pid(conn): assert conn.info.backend_pid assert conn.info.backend_pid == conn.pgconn.backend_pid conn.close() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): conn.info.backend_pid @@ -256,7 +256,7 @@ def test_encoding_env_var(conn_cls, dsn, monkeypatch, enc, out, codec): def test_set_encoding_unsupported(conn): cur = conn.cursor() cur.execute("set client_encoding to EUC_TW") - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): cur.execute("select 'x'") diff --git a/tests/test_conninfo.py b/tests/test_conninfo.py index 4222dce30..da4cc8275 100644 --- a/tests/test_conninfo.py +++ b/tests/test_conninfo.py @@ -1,8 +1,8 @@ import pytest -from psycopg import ProgrammingError -from psycopg.conninfo import _DEFAULT_CONNECT_TIMEOUT, conninfo_to_dict, make_conninfo -from psycopg.conninfo import timeout_from_conninfo +from gaussdb import ProgrammingError +from gaussdb.conninfo import _DEFAULT_CONNECT_TIMEOUT, conninfo_to_dict, make_conninfo +from gaussdb.conninfo import timeout_from_conninfo snowman = "\u2603" diff --git a/tests/test_conninfo_attempts.py b/tests/test_conninfo_attempts.py index 25bc0dfd7..cd96a9707 100644 --- a/tests/test_conninfo_attempts.py +++ b/tests/test_conninfo_attempts.py @@ -3,8 +3,8 @@ # DO NOT CHANGE! Change the original file instead. import pytest -import psycopg -from psycopg.conninfo import conninfo_attempts, conninfo_to_dict +import gaussdb +from gaussdb.conninfo import conninfo_attempts, conninfo_to_dict pytestmark = pytest.mark.anyio @@ -162,7 +162,7 @@ def test_conninfo_attempts(conninfo, want, env, fake_resolve): def test_conninfo_attempts_bad(setpgenv, conninfo, env, fake_resolve): setpgenv(env) params = conninfo_to_dict(conninfo) - with pytest.raises(psycopg.Error): + with pytest.raises(gaussdb.Error): conninfo_attempts(params) diff --git a/tests/test_conninfo_attempts_async.py b/tests/test_conninfo_attempts_async.py index dcf1a51f7..6b809c349 100644 --- a/tests/test_conninfo_attempts_async.py +++ b/tests/test_conninfo_attempts_async.py @@ -1,7 +1,7 @@ import pytest -import psycopg -from psycopg.conninfo import conninfo_attempts_async, conninfo_to_dict +import gaussdb +from gaussdb.conninfo import conninfo_attempts_async, conninfo_to_dict pytestmark = pytest.mark.anyio @@ -169,7 +169,7 @@ async def test_conninfo_attempts(conninfo, want, env, fake_resolve): async def test_conninfo_attempts_bad(setpgenv, conninfo, env, fake_resolve): setpgenv(env) params = conninfo_to_dict(conninfo) - with pytest.raises(psycopg.Error): + with pytest.raises(gaussdb.Error): await conninfo_attempts_async(params) diff --git a/tests/test_copy.py b/tests/test_copy.py index 82ef9adec..8beb2c8a9 100644 --- a/tests/test_copy.py +++ b/tests/test_copy.py @@ -9,14 +9,14 @@ import pytest -import psycopg -from psycopg import errors as e -from psycopg import pq, sql -from psycopg.copy import Copy, LibpqWriter, QueuedLibpqWriter -from psycopg.adapt import PyFormat -from psycopg.types import TypeInfo -from psycopg.types.hstore import register_hstore -from psycopg.types.numeric import Int4 +import gaussdb +from gaussdb import errors as e +from gaussdb import pq, sql +from gaussdb.copy import Copy, LibpqWriter, QueuedLibpqWriter +from gaussdb.adapt import PyFormat +from gaussdb.types import TypeInfo +from gaussdb.types.hstore import register_hstore +from gaussdb.types.numeric import Int4 from .utils import eur from ._test_copy import sample_binary # noqa: F401 @@ -59,7 +59,7 @@ def test_copy_out_iter(conn, format, row_factory): else: want = sample_binary_rows - rf = getattr(psycopg.rows, row_factory) + rf = getattr(gaussdb.rows, row_factory) cur = conn.cursor(row_factory=rf) with cur.copy(f"copy ({sample_values}) to stdout (format {format.name})") as copy: result = [bytes(item) for item in copy] @@ -71,7 +71,7 @@ def test_copy_out_iter(conn, format, row_factory): @pytest.mark.parametrize("format", pq.Format) @pytest.mark.parametrize("row_factory", ["tuple_row", "dict_row", "namedtuple_row"]) def test_copy_out_no_result(conn, format, row_factory): - rf = getattr(psycopg.rows, row_factory) + rf = getattr(gaussdb.rows, row_factory) cur = conn.cursor(row_factory=rf) with cur.copy(f"copy ({sample_values}) to stdout (format {format.name})"): with pytest.raises(e.ProgrammingError): @@ -309,9 +309,9 @@ def test_copy_big_size_block(conn, pytype): @pytest.mark.parametrize("format", pq.Format) def test_subclass_adapter(conn, format): if format == pq.Format.TEXT: - from psycopg.types.string import StrDumper as BaseDumper + from gaussdb.types.string import StrDumper as BaseDumper else: - from psycopg.types.string import StrBinaryDumper + from gaussdb.types.string import StrBinaryDumper BaseDumper = StrBinaryDumper # type: ignore @@ -684,7 +684,7 @@ def copy_to_broken(pgconn, buffer, flush=True): raise ZeroDivisionError yield - monkeypatch.setattr(psycopg._copy, "copy_to", copy_to_broken) + monkeypatch.setattr(gaussdb._copy, "copy_to", copy_to_broken) cur = conn.cursor() cur.execute("create temp table wat (a text, b text)") with pytest.raises(ZeroDivisionError): @@ -761,7 +761,7 @@ def work(): break elif method == "rows": list(copy.rows()) - except (psycopg.OperationalError, psycopg.DataError) as e: + except (gaussdb.OperationalError, gaussdb.DataError) as e: if "no COPY in progress" in str( e ) or "binary copy doesn't start" in str(e): diff --git a/tests/test_copy_async.py b/tests/test_copy_async.py index 1c901ffe9..7d33ee578 100644 --- a/tests/test_copy_async.py +++ b/tests/test_copy_async.py @@ -6,14 +6,14 @@ import pytest -import psycopg -from psycopg import errors as e -from psycopg import pq, sql -from psycopg.copy import AsyncCopy, AsyncLibpqWriter, AsyncQueuedLibpqWriter -from psycopg.adapt import PyFormat -from psycopg.types import TypeInfo -from psycopg.types.hstore import register_hstore -from psycopg.types.numeric import Int4 +import gaussdb +from gaussdb import errors as e +from gaussdb import pq, sql +from gaussdb.copy import AsyncCopy, AsyncLibpqWriter, AsyncQueuedLibpqWriter +from gaussdb.adapt import PyFormat +from gaussdb.types import TypeInfo +from gaussdb.types.hstore import register_hstore +from gaussdb.types.numeric import Int4 from .utils import eur from .acompat import alist @@ -59,7 +59,7 @@ async def test_copy_out_iter(aconn, format, row_factory): else: want = sample_binary_rows - rf = getattr(psycopg.rows, row_factory) + rf = getattr(gaussdb.rows, row_factory) cur = aconn.cursor(row_factory=rf) async with cur.copy( f"copy ({sample_values}) to stdout (format {format.name})" @@ -74,7 +74,7 @@ async def test_copy_out_iter(aconn, format, row_factory): @pytest.mark.parametrize("format", pq.Format) @pytest.mark.parametrize("row_factory", ["tuple_row", "dict_row", "namedtuple_row"]) async def test_copy_out_no_result(aconn, format, row_factory): - rf = getattr(psycopg.rows, row_factory) + rf = getattr(gaussdb.rows, row_factory) cur = aconn.cursor(row_factory=rf) async with cur.copy(f"copy ({sample_values}) to stdout (format {format.name})"): with pytest.raises(e.ProgrammingError): @@ -320,9 +320,9 @@ async def test_copy_big_size_block(aconn, pytype): @pytest.mark.parametrize("format", pq.Format) async def test_subclass_adapter(aconn, format): if format == pq.Format.TEXT: - from psycopg.types.string import StrDumper as BaseDumper + from gaussdb.types.string import StrDumper as BaseDumper else: - from psycopg.types.string import StrBinaryDumper + from gaussdb.types.string import StrBinaryDumper BaseDumper = StrBinaryDumper # type: ignore @@ -699,7 +699,7 @@ def copy_to_broken(pgconn, buffer, flush=True): raise ZeroDivisionError yield - monkeypatch.setattr(psycopg._copy_async, "copy_to", copy_to_broken) + monkeypatch.setattr(gaussdb._copy_async, "copy_to", copy_to_broken) cur = aconn.cursor() await cur.execute("create temp table wat (a text, b text)") with pytest.raises(ZeroDivisionError): @@ -778,7 +778,7 @@ async def work(): break elif method == "rows": await alist(copy.rows()) - except (psycopg.OperationalError, psycopg.DataError) as e: + except (gaussdb.OperationalError, gaussdb.DataError) as e: if "no COPY in progress" in str( e ) or "binary copy doesn't start" in str(e): diff --git a/tests/test_cursor.py b/tests/test_cursor.py index e31791e96..b7b6531ce 100644 --- a/tests/test_cursor.py +++ b/tests/test_cursor.py @@ -2,31 +2,31 @@ # from the original file 'test_cursor_async.py' # DO NOT CHANGE! Change the original file instead. """ -Tests for psycopg.Cursor that are not supposed to pass for subclasses. +Tests for gaussdb.Cursor that are not supposed to pass for subclasses. """ import pytest -import psycopg -from psycopg import errors as e -from psycopg import pq, rows -from psycopg.adapt import PyFormat +import gaussdb +from gaussdb import errors as e +from gaussdb import pq, rows +from gaussdb.adapt import PyFormat def test_default_cursor(conn): cur = conn.cursor() - assert type(cur) is psycopg.Cursor + assert type(cur) is gaussdb.Cursor def test_from_cursor_factory(conn_cls, dsn): - with conn_cls.connect(dsn, cursor_factory=psycopg.ClientCursor) as conn: + with conn_cls.connect(dsn, cursor_factory=gaussdb.ClientCursor) as conn: cur = conn.cursor() - assert type(cur) is psycopg.ClientCursor + assert type(cur) is gaussdb.ClientCursor def test_str(conn): cur = conn.cursor() - assert "psycopg.%s" % psycopg.Cursor.__name__ in str(cur) + assert "gaussdb.%s" % gaussdb.Cursor.__name__ in str(cur) def test_execute_many_results_param(conn): @@ -49,7 +49,7 @@ def test_query_params_execute(conn): assert cur._query.query == b"select 1" assert not cur._query.params - with pytest.raises(psycopg.DataError): + with pytest.raises(gaussdb.DataError): cur.execute("select %t::int", ["wat"]) assert cur._query.query == b"select $1::int" diff --git a/tests/test_cursor_async.py b/tests/test_cursor_async.py index f54b31aa7..7ec5a0ba1 100644 --- a/tests/test_cursor_async.py +++ b/tests/test_cursor_async.py @@ -1,31 +1,31 @@ """ -Tests for psycopg.Cursor that are not supposed to pass for subclasses. +Tests for gaussdb.Cursor that are not supposed to pass for subclasses. """ import pytest -import psycopg -from psycopg import errors as e -from psycopg import pq, rows -from psycopg.adapt import PyFormat +import gaussdb +from gaussdb import errors as e +from gaussdb import pq, rows +from gaussdb.adapt import PyFormat async def test_default_cursor(aconn): cur = aconn.cursor() - assert type(cur) is psycopg.AsyncCursor + assert type(cur) is gaussdb.AsyncCursor async def test_from_cursor_factory(aconn_cls, dsn): async with await aconn_cls.connect( - dsn, cursor_factory=psycopg.AsyncClientCursor + dsn, cursor_factory=gaussdb.AsyncClientCursor ) as aconn: cur = aconn.cursor() - assert type(cur) is psycopg.AsyncClientCursor + assert type(cur) is gaussdb.AsyncClientCursor async def test_str(aconn): cur = aconn.cursor() - assert "psycopg.%s" % psycopg.AsyncCursor.__name__ in str(cur) + assert "gaussdb.%s" % gaussdb.AsyncCursor.__name__ in str(cur) async def test_execute_many_results_param(aconn): @@ -48,7 +48,7 @@ async def test_query_params_execute(aconn): assert cur._query.query == b"select 1" assert not cur._query.params - with pytest.raises(psycopg.DataError): + with pytest.raises(gaussdb.DataError): await cur.execute("select %t::int", ["wat"]) assert cur._query.query == b"select $1::int" diff --git a/tests/test_cursor_client.py b/tests/test_cursor_client.py index 70a4d55db..c5d0067ae 100644 --- a/tests/test_cursor_client.py +++ b/tests/test_cursor_client.py @@ -5,32 +5,32 @@ import pytest -import psycopg -from psycopg import rows +import gaussdb +from gaussdb import rows from .fix_crdb import crdb_encoding @pytest.fixture def conn(conn, anyio_backend): - conn.cursor_factory = psycopg.ClientCursor + conn.cursor_factory = gaussdb.ClientCursor return conn def test_default_cursor(conn): cur = conn.cursor() - assert type(cur) is psycopg.ClientCursor + assert type(cur) is gaussdb.ClientCursor def test_str(conn): cur = conn.cursor() - assert "psycopg.%s" % psycopg.ClientCursor.__name__ in str(cur) + assert "gaussdb.%s" % gaussdb.ClientCursor.__name__ in str(cur) def test_from_cursor_factory(conn_cls, dsn): - with conn_cls.connect(dsn, cursor_factory=psycopg.ClientCursor) as conn: + with conn_cls.connect(dsn, cursor_factory=gaussdb.ClientCursor) as conn: cur = conn.cursor() - assert type(cur) is psycopg.ClientCursor + assert type(cur) is gaussdb.ClientCursor def test_execute_many_results_param(conn): @@ -62,7 +62,7 @@ def test_query_params_execute(conn): assert cur._query.query == b"select 1" assert not cur._query.params - with pytest.raises(psycopg.DataError): + with pytest.raises(gaussdb.DataError): cur.execute("select %t::int", ["wat"]) assert cur._query.query == b"select 'wat'::int" @@ -87,7 +87,7 @@ def test_leak(conn_cls, dsn, faker, fetch, row_factory, gc): def work(): with conn_cls.connect(dsn) as conn, conn.transaction(force_rollback=True): - with psycopg.ClientCursor(conn, row_factory=row_factory) as cur: + with gaussdb.ClientCursor(conn, row_factory=row_factory) as cur: cur.execute(faker.drop_stmt) cur.execute(faker.create_stmt) with faker.find_insert_problem(conn): diff --git a/tests/test_cursor_client_async.py b/tests/test_cursor_client_async.py index 753c95434..198250905 100644 --- a/tests/test_cursor_client_async.py +++ b/tests/test_cursor_client_async.py @@ -2,34 +2,34 @@ import pytest -import psycopg -from psycopg import rows +import gaussdb +from gaussdb import rows from .fix_crdb import crdb_encoding @pytest.fixture async def aconn(aconn, anyio_backend): - aconn.cursor_factory = psycopg.AsyncClientCursor + aconn.cursor_factory = gaussdb.AsyncClientCursor return aconn async def test_default_cursor(aconn): cur = aconn.cursor() - assert type(cur) is psycopg.AsyncClientCursor + assert type(cur) is gaussdb.AsyncClientCursor async def test_str(aconn): cur = aconn.cursor() - assert "psycopg.%s" % psycopg.AsyncClientCursor.__name__ in str(cur) + assert "gaussdb.%s" % gaussdb.AsyncClientCursor.__name__ in str(cur) async def test_from_cursor_factory(aconn_cls, dsn): async with await aconn_cls.connect( - dsn, cursor_factory=psycopg.AsyncClientCursor + dsn, cursor_factory=gaussdb.AsyncClientCursor ) as aconn: cur = aconn.cursor() - assert type(cur) is psycopg.AsyncClientCursor + assert type(cur) is gaussdb.AsyncClientCursor async def test_execute_many_results_param(aconn): @@ -61,7 +61,7 @@ async def test_query_params_execute(aconn): assert cur._query.query == b"select 1" assert not cur._query.params - with pytest.raises(psycopg.DataError): + with pytest.raises(gaussdb.DataError): await cur.execute("select %t::int", ["wat"]) assert cur._query.query == b"select 'wat'::int" @@ -88,7 +88,7 @@ async def work(): async with await aconn_cls.connect(dsn) as conn, conn.transaction( force_rollback=True ): - async with psycopg.AsyncClientCursor(conn, row_factory=row_factory) as cur: + async with gaussdb.AsyncClientCursor(conn, row_factory=row_factory) as cur: await cur.execute(faker.drop_stmt) await cur.execute(faker.create_stmt) async with faker.find_insert_problem_async(conn): diff --git a/tests/test_cursor_common.py b/tests/test_cursor_common.py index 4a76f4ffe..dd361f8e2 100644 --- a/tests/test_cursor_common.py +++ b/tests/test_cursor_common.py @@ -2,7 +2,7 @@ # from the original file 'test_cursor_common_async.py' # DO NOT CHANGE! Change the original file instead. """ -Tests common to psycopg.Cursor and its subclasses. +Tests common to gaussdb.Cursor and its subclasses. """ import weakref @@ -12,10 +12,10 @@ import pytest from packaging.version import parse as ver -import psycopg -from psycopg import pq, rows, sql -from psycopg.adapt import PyFormat -from psycopg.types import TypeInfo +import gaussdb +from gaussdb import pq, rows, sql +from gaussdb.adapt import PyFormat +from gaussdb.types import TypeInfo from . import _test_cursor from .utils import raiseif @@ -26,12 +26,12 @@ execmany = _test_cursor.execmany # avoid F811 underneath _execmany = _test_cursor._execmany # needed by the execmany fixture -cursor_classes = [psycopg.Cursor, psycopg.ClientCursor] -# Allow to import (not necessarily to run) the module with psycopg 3.1. -# Needed to test psycopg_pool 3.2 tests with psycopg 3.1 imported, i.e. to run +cursor_classes = [gaussdb.Cursor, gaussdb.ClientCursor] +# Allow to import (not necessarily to run) the module with gaussdb 3.1. +# Needed to test gaussdb_pool 3.2 tests with gaussdb 3.1 imported, i.e. to run # `pytest -m pool`. (which might happen when releasing pool packages). -if ver(psycopg.__version__) >= ver("3.2.0.dev0"): - cursor_classes.append(psycopg.RawCursor) +if ver(gaussdb.__version__) >= ver("3.2.0.dev0"): + cursor_classes.append(gaussdb.RawCursor) @pytest.fixture(params=cursor_classes) @@ -63,7 +63,7 @@ def test_close(conn): cur.close() assert cur.closed - with pytest.raises(psycopg.InterfaceError): + with pytest.raises(gaussdb.InterfaceError): cur.execute("select 'foo'") cur.close() @@ -82,7 +82,7 @@ def test_cursor_close_fetchone(conn): cur.close() assert cur.closed - with pytest.raises(psycopg.InterfaceError): + with pytest.raises(gaussdb.InterfaceError): cur.fetchone() @@ -97,7 +97,7 @@ def test_cursor_close_fetchmany(conn): cur.close() assert cur.closed - with pytest.raises(psycopg.InterfaceError): + with pytest.raises(gaussdb.InterfaceError): cur.fetchmany(2) @@ -112,7 +112,7 @@ def test_cursor_close_fetchall(conn): cur.close() assert cur.closed - with pytest.raises(psycopg.InterfaceError): + with pytest.raises(gaussdb.InterfaceError): cur.fetchall() @@ -151,7 +151,7 @@ def test_statusmessage(conn): cur.execute("create table statusmessage (dummy_column int)") assert cur.statusmessage == "CREATE TABLE" - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): cur.execute("wat") assert cur.statusmessage is None @@ -168,11 +168,11 @@ def test_query_parse_cache_size(conn): # Warning: testing internal structures. Test might need refactoring with the code. cache: Any - if cls is psycopg.Cursor: - cache = psycopg._queries._query2pg - elif cls is psycopg.ClientCursor: - cache = psycopg._queries._query2pg_client - elif cls is psycopg.RawCursor: + if cls is gaussdb.Cursor: + cache = gaussdb._queries._query2pg + elif cls is gaussdb.ClientCursor: + cache = gaussdb._queries._query2pg_client + elif cls is gaussdb.RawCursor: pytest.skip("RawCursor has no query parse cache") else: assert False, cls @@ -191,7 +191,7 @@ def test_query_parse_cache_size(conn): (f"select 1 -- {'%s' * 60}", ("x",) * 60, h0 + 2, m0 + 2), ] for i, (query, params, hits, misses) in enumerate(tests): - pq = cur._query_cls(psycopg.adapt.Transformer()) + pq = cur._query_cls(gaussdb.adapt.Transformer()) pq.convert(query, params) ci = cache.cache_info() assert ci.hits == hits, f"at {i}" @@ -231,7 +231,7 @@ def test_execute_empty_query(conn, query): cur = conn.cursor() cur.execute(query) assert cur.pgresult.status == pq.ExecStatus.EMPTY_QUERY - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): cur.fetchone() @@ -261,7 +261,7 @@ def test_executemany_type_change(conn): def test_execute_copy(conn, query): cur = conn.cursor() cur.execute("create table testcopy (id int)") - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): cur.execute(query) @@ -278,7 +278,7 @@ def test_fetchone(conn): def test_binary_cursor_execute(conn): with raiseif( - conn.cursor_factory is psycopg.ClientCursor, psycopg.NotSupportedError + conn.cursor_factory is gaussdb.ClientCursor, gaussdb.NotSupportedError ) as ex: cur = conn.cursor(binary=True) cur.execute(ph(cur, "select %s, %s"), [1, None]) @@ -293,7 +293,7 @@ def test_binary_cursor_execute(conn): def test_execute_binary(conn): cur = conn.cursor() with raiseif( - conn.cursor_factory is psycopg.ClientCursor, psycopg.NotSupportedError + conn.cursor_factory is gaussdb.ClientCursor, gaussdb.NotSupportedError ) as ex: cur.execute(ph(cur, "select %s, %s"), [1, None], binary=True) if ex: @@ -388,7 +388,7 @@ def test_executemany_returning_discard(conn, execmany): [(10, "hello"), (20, "world")], ) assert cur.rowcount == 2 - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): cur.fetchone() assert cur.nextset() is None @@ -402,7 +402,7 @@ def test_executemany_no_result(conn, execmany): ) assert cur.rowcount == 1 assert cur.statusmessage.startswith("INSERT") - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): cur.fetchone() pgresult = cur.pgresult assert cur.nextset() @@ -434,7 +434,7 @@ def test_executemany_rowcount_no_hit(conn, execmany): ) def test_executemany_badquery(conn, query): cur = conn.cursor() - with pytest.raises(psycopg.DatabaseError): + with pytest.raises(gaussdb.DatabaseError): cur.executemany(ph(cur, query), [(10, "hello"), (20, "world")]) @@ -446,7 +446,7 @@ def test_executemany_null_first(conn, fmt_in): ph(cur, f"insert into testmany values (%{fmt_in.value}, %{fmt_in.value})"), [[1, None], [3, 4]], ) - with pytest.raises((psycopg.DataError, psycopg.ProgrammingError)): + with pytest.raises((gaussdb.DataError, gaussdb.ProgrammingError)): cur.executemany( ph(cur, f"insert into testmany values (%{fmt_in.value}, %{fmt_in.value})"), [[1, ""], [3, 4]], @@ -550,7 +550,7 @@ def test_row_factory(conn): cur = conn.cursor(row_factory=my_row_factory) cur.execute("reset search_path") - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): cur.fetchone() cur.execute("select 'foo' as bar") @@ -600,7 +600,7 @@ def make_row(seq): def test_scroll(conn): cur = conn.cursor() - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): cur.scroll(0) cur.execute("select generate_series(0,9)") @@ -703,7 +703,7 @@ def test_stream_chunked_invalid_size(conn): @pytest.mark.libpq("< 17") def test_stream_chunked_not_supported(conn): cur = conn.cursor() - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): next(cur.stream("select generate_series(1, 4)", size=2)) @@ -733,14 +733,14 @@ def test_stream_chunked_row_factory(conn): ) def test_stream_badquery(conn, query): cur = conn.cursor() - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): for rec in cur.stream(query): pass def test_stream_error_tx(conn): cur = conn.cursor() - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): for rec in cur.stream("wat"): pass assert conn.info.transaction_status == pq.TransactionStatus.INERROR @@ -749,7 +749,7 @@ def test_stream_error_tx(conn): def test_stream_error_notx(conn): conn.set_autocommit(True) cur = conn.cursor() - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): for rec in cur.stream("wat"): pass assert conn.info.transaction_status == pq.TransactionStatus.IDLE @@ -782,7 +782,7 @@ def test_stream_error_python_consumed(conn): def test_stream_close(conn, autocommit): conn.set_autocommit(autocommit) cur = conn.cursor() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): for rec in cur.stream("select generate_series(1, 3)"): if rec[0] == 1: conn.close() @@ -794,7 +794,7 @@ def test_stream_close(conn, autocommit): def test_stream_binary_cursor(conn): with raiseif( - conn.cursor_factory is psycopg.ClientCursor, psycopg.NotSupportedError + conn.cursor_factory is gaussdb.ClientCursor, gaussdb.NotSupportedError ): cur = conn.cursor(binary=True) recs = [] @@ -810,7 +810,7 @@ def test_stream_execute_binary(conn): cur = conn.cursor() recs = [] with raiseif( - conn.cursor_factory is psycopg.ClientCursor, psycopg.NotSupportedError + conn.cursor_factory is gaussdb.ClientCursor, gaussdb.NotSupportedError ): for rec in cur.stream( "select x::int4 from generate_series(1, 2) x", binary=True @@ -850,7 +850,7 @@ def test_str(conn): @pytest.mark.pipeline def test_message_0x33(conn): - # https://github.com/psycopg/psycopg/issues/314 + # https://github.com/gaussdb/gaussdb/issues/314 notices = [] conn.add_notice_handler(lambda diag: notices.append(diag.message_primary)) diff --git a/tests/test_cursor_common_async.py b/tests/test_cursor_common_async.py index 8aaa89c38..c94e2cac9 100644 --- a/tests/test_cursor_common_async.py +++ b/tests/test_cursor_common_async.py @@ -1,5 +1,5 @@ """ -Tests common to psycopg.AsyncCursor and its subclasses. +Tests common to gaussdb.AsyncCursor and its subclasses. """ import weakref @@ -9,10 +9,10 @@ import pytest from packaging.version import parse as ver -import psycopg -from psycopg import pq, rows, sql -from psycopg.adapt import PyFormat -from psycopg.types import TypeInfo +import gaussdb +from gaussdb import pq, rows, sql +from gaussdb.adapt import PyFormat +from gaussdb.types import TypeInfo from . import _test_cursor from .utils import raiseif @@ -24,12 +24,12 @@ _execmany = _test_cursor._execmany # needed by the execmany fixture -cursor_classes = [psycopg.AsyncCursor, psycopg.AsyncClientCursor] -# Allow to import (not necessarily to run) the module with psycopg 3.1. -# Needed to test psycopg_pool 3.2 tests with psycopg 3.1 imported, i.e. to run +cursor_classes = [gaussdb.AsyncCursor, gaussdb.AsyncClientCursor] +# Allow to import (not necessarily to run) the module with gaussdb 3.1. +# Needed to test gaussdb_pool 3.2 tests with gaussdb 3.1 imported, i.e. to run # `pytest -m pool`. (which might happen when releasing pool packages). -if ver(psycopg.__version__) >= ver("3.2.0.dev0"): - cursor_classes.append(psycopg.AsyncRawCursor) +if ver(gaussdb.__version__) >= ver("3.2.0.dev0"): + cursor_classes.append(gaussdb.AsyncRawCursor) @pytest.fixture(params=cursor_classes) @@ -61,7 +61,7 @@ async def test_close(aconn): await cur.close() assert cur.closed - with pytest.raises(psycopg.InterfaceError): + with pytest.raises(gaussdb.InterfaceError): await cur.execute("select 'foo'") await cur.close() @@ -80,7 +80,7 @@ async def test_cursor_close_fetchone(aconn): await cur.close() assert cur.closed - with pytest.raises(psycopg.InterfaceError): + with pytest.raises(gaussdb.InterfaceError): await cur.fetchone() @@ -95,7 +95,7 @@ async def test_cursor_close_fetchmany(aconn): await cur.close() assert cur.closed - with pytest.raises(psycopg.InterfaceError): + with pytest.raises(gaussdb.InterfaceError): await cur.fetchmany(2) @@ -110,7 +110,7 @@ async def test_cursor_close_fetchall(aconn): await cur.close() assert cur.closed - with pytest.raises(psycopg.InterfaceError): + with pytest.raises(gaussdb.InterfaceError): await cur.fetchall() @@ -149,7 +149,7 @@ async def test_statusmessage(aconn): await cur.execute("create table statusmessage (dummy_column int)") assert cur.statusmessage == "CREATE TABLE" - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): await cur.execute("wat") assert cur.statusmessage is None @@ -166,11 +166,11 @@ async def test_query_parse_cache_size(aconn): # Warning: testing internal structures. Test might need refactoring with the code. cache: Any - if cls is psycopg.AsyncCursor: - cache = psycopg._queries._query2pg - elif cls is psycopg.AsyncClientCursor: - cache = psycopg._queries._query2pg_client - elif cls is psycopg.AsyncRawCursor: + if cls is gaussdb.AsyncCursor: + cache = gaussdb._queries._query2pg + elif cls is gaussdb.AsyncClientCursor: + cache = gaussdb._queries._query2pg_client + elif cls is gaussdb.AsyncRawCursor: pytest.skip("RawCursor has no query parse cache") else: assert False, cls @@ -189,7 +189,7 @@ async def test_query_parse_cache_size(aconn): (f"select 1 -- {'%s' * 60}", ("x",) * 60, h0 + 2, m0 + 2), ] for i, (query, params, hits, misses) in enumerate(tests): - pq = cur._query_cls(psycopg.adapt.Transformer()) + pq = cur._query_cls(gaussdb.adapt.Transformer()) pq.convert(query, params) ci = cache.cache_info() assert ci.hits == hits, f"at {i}" @@ -231,7 +231,7 @@ async def test_execute_empty_query(aconn, query): cur = aconn.cursor() await cur.execute(query) assert cur.pgresult.status == pq.ExecStatus.EMPTY_QUERY - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): await cur.fetchone() @@ -261,7 +261,7 @@ async def test_executemany_type_change(aconn): async def test_execute_copy(aconn, query): cur = aconn.cursor() await cur.execute("create table testcopy (id int)") - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): await cur.execute(query) @@ -278,7 +278,7 @@ async def test_fetchone(aconn): async def test_binary_cursor_execute(aconn): with raiseif( - aconn.cursor_factory is psycopg.AsyncClientCursor, psycopg.NotSupportedError + aconn.cursor_factory is gaussdb.AsyncClientCursor, gaussdb.NotSupportedError ) as ex: cur = aconn.cursor(binary=True) await cur.execute(ph(cur, "select %s, %s"), [1, None]) @@ -293,7 +293,7 @@ async def test_binary_cursor_execute(aconn): async def test_execute_binary(aconn): cur = aconn.cursor() with raiseif( - aconn.cursor_factory is psycopg.AsyncClientCursor, psycopg.NotSupportedError + aconn.cursor_factory is gaussdb.AsyncClientCursor, gaussdb.NotSupportedError ) as ex: await cur.execute(ph(cur, "select %s, %s"), [1, None], binary=True) if ex: @@ -390,7 +390,7 @@ async def test_executemany_returning_discard(aconn, execmany): [(10, "hello"), (20, "world")], ) assert cur.rowcount == 2 - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): await cur.fetchone() assert cur.nextset() is None @@ -404,7 +404,7 @@ async def test_executemany_no_result(aconn, execmany): ) assert cur.rowcount == 1 assert cur.statusmessage.startswith("INSERT") - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): await cur.fetchone() pgresult = cur.pgresult assert cur.nextset() @@ -436,7 +436,7 @@ async def test_executemany_rowcount_no_hit(aconn, execmany): ) async def test_executemany_badquery(aconn, query): cur = aconn.cursor() - with pytest.raises(psycopg.DatabaseError): + with pytest.raises(gaussdb.DatabaseError): await cur.executemany(ph(cur, query), [(10, "hello"), (20, "world")]) @@ -448,7 +448,7 @@ async def test_executemany_null_first(aconn, fmt_in): ph(cur, f"insert into testmany values (%{fmt_in.value}, %{fmt_in.value})"), [[1, None], [3, 4]], ) - with pytest.raises((psycopg.DataError, psycopg.ProgrammingError)): + with pytest.raises((gaussdb.DataError, gaussdb.ProgrammingError)): await cur.executemany( ph(cur, f"insert into testmany values (%{fmt_in.value}, %{fmt_in.value})"), [[1, ""], [3, 4]], @@ -554,7 +554,7 @@ async def test_row_factory(aconn): cur = aconn.cursor(row_factory=my_row_factory) await cur.execute("reset search_path") - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): await cur.fetchone() await cur.execute("select 'foo' as bar") @@ -602,7 +602,7 @@ def make_row(seq): async def test_scroll(aconn): cur = aconn.cursor() - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): await cur.scroll(0) await cur.execute("select generate_series(0,9)") @@ -705,7 +705,7 @@ async def test_stream_chunked_invalid_size(aconn): @pytest.mark.libpq("< 17") async def test_stream_chunked_not_supported(aconn): cur = aconn.cursor() - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): await anext(cur.stream("select generate_series(1, 4)", size=2)) @@ -735,14 +735,14 @@ async def test_stream_chunked_row_factory(aconn): ) async def test_stream_badquery(aconn, query): cur = aconn.cursor() - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): async for rec in cur.stream(query): pass async def test_stream_error_tx(aconn): cur = aconn.cursor() - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): async for rec in cur.stream("wat"): pass assert aconn.info.transaction_status == pq.TransactionStatus.INERROR @@ -751,7 +751,7 @@ async def test_stream_error_tx(aconn): async def test_stream_error_notx(aconn): await aconn.set_autocommit(True) cur = aconn.cursor() - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): async for rec in cur.stream("wat"): pass assert aconn.info.transaction_status == pq.TransactionStatus.IDLE @@ -784,7 +784,7 @@ async def test_stream_error_python_consumed(aconn): async def test_stream_close(aconn, autocommit): await aconn.set_autocommit(autocommit) cur = aconn.cursor() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): async for rec in cur.stream("select generate_series(1, 3)"): if rec[0] == 1: await aconn.close() @@ -796,7 +796,7 @@ async def test_stream_close(aconn, autocommit): async def test_stream_binary_cursor(aconn): with raiseif( - aconn.cursor_factory is psycopg.AsyncClientCursor, psycopg.NotSupportedError + aconn.cursor_factory is gaussdb.AsyncClientCursor, gaussdb.NotSupportedError ): cur = aconn.cursor(binary=True) recs = [] @@ -812,7 +812,7 @@ async def test_stream_execute_binary(aconn): cur = aconn.cursor() recs = [] with raiseif( - aconn.cursor_factory is psycopg.AsyncClientCursor, psycopg.NotSupportedError + aconn.cursor_factory is gaussdb.AsyncClientCursor, gaussdb.NotSupportedError ): async for rec in cur.stream( "select x::int4 from generate_series(1, 2) x", binary=True @@ -852,7 +852,7 @@ async def test_str(aconn): @pytest.mark.pipeline async def test_message_0x33(aconn): - # https://github.com/psycopg/psycopg/issues/314 + # https://github.com/gaussdb/gaussdb/issues/314 notices = [] aconn.add_notice_handler(lambda diag: notices.append(diag.message_primary)) diff --git a/tests/test_cursor_raw.py b/tests/test_cursor_raw.py index 64ef05125..015b62892 100644 --- a/tests/test_cursor_raw.py +++ b/tests/test_cursor_raw.py @@ -3,28 +3,28 @@ # DO NOT CHANGE! Change the original file instead. import pytest -import psycopg -from psycopg import errors as e -from psycopg import pq, rows -from psycopg.adapt import PyFormat +import gaussdb +from gaussdb import errors as e +from gaussdb import pq, rows +from gaussdb.adapt import PyFormat from ._test_cursor import ph @pytest.fixture def conn(conn, anyio_backend): - conn.cursor_factory = psycopg.RawCursor + conn.cursor_factory = gaussdb.RawCursor return conn def test_default_cursor(conn): cur = conn.cursor() - assert type(cur) is psycopg.RawCursor + assert type(cur) is gaussdb.RawCursor def test_str(conn): cur = conn.cursor() - assert "psycopg.%s" % psycopg.RawCursor.__name__ in str(cur) + assert "gaussdb.%s" % gaussdb.RawCursor.__name__ in str(cur) def test_sequence_only(conn): @@ -56,7 +56,7 @@ def test_query_params_execute(conn): assert cur._query.query == b"select 1" assert not cur._query.params - with pytest.raises(psycopg.DataError): + with pytest.raises(gaussdb.DataError): cur.execute("select $1::int", ["wat"]) assert cur._query.query == b"select $1::int" diff --git a/tests/test_cursor_raw_async.py b/tests/test_cursor_raw_async.py index 42a241e4a..a562d4cc2 100644 --- a/tests/test_cursor_raw_async.py +++ b/tests/test_cursor_raw_async.py @@ -1,27 +1,27 @@ import pytest -import psycopg -from psycopg import errors as e -from psycopg import pq, rows -from psycopg.adapt import PyFormat +import gaussdb +from gaussdb import errors as e +from gaussdb import pq, rows +from gaussdb.adapt import PyFormat from ._test_cursor import ph @pytest.fixture async def aconn(aconn, anyio_backend): - aconn.cursor_factory = psycopg.AsyncRawCursor + aconn.cursor_factory = gaussdb.AsyncRawCursor return aconn async def test_default_cursor(aconn): cur = aconn.cursor() - assert type(cur) is psycopg.AsyncRawCursor + assert type(cur) is gaussdb.AsyncRawCursor async def test_str(aconn): cur = aconn.cursor() - assert "psycopg.%s" % psycopg.AsyncRawCursor.__name__ in str(cur) + assert "gaussdb.%s" % gaussdb.AsyncRawCursor.__name__ in str(cur) async def test_sequence_only(aconn): @@ -53,7 +53,7 @@ async def test_query_params_execute(aconn): assert cur._query.query == b"select 1" assert not cur._query.params - with pytest.raises(psycopg.DataError): + with pytest.raises(gaussdb.DataError): await cur.execute("select $1::int", ["wat"]) assert cur._query.query == b"select $1::int" diff --git a/tests/test_cursor_server.py b/tests/test_cursor_server.py index 693fa5c61..ff856ab15 100644 --- a/tests/test_cursor_server.py +++ b/tests/test_cursor_server.py @@ -4,18 +4,18 @@ import pytest from packaging.version import parse as ver -import psycopg -from psycopg import errors as e -from psycopg import pq, rows +import gaussdb +from gaussdb import errors as e +from gaussdb import pq, rows from ._test_cursor import ph pytestmark = pytest.mark.crdb_skip("server-side cursor") -cursor_classes = [psycopg.ServerCursor] -# Allow to import (not necessarily to run) the module with psycopg 3.1. -if ver(psycopg.__version__) >= ver("3.2.0.dev0"): - cursor_classes.append(psycopg.RawServerCursor) +cursor_classes = [gaussdb.ServerCursor] +# Allow to import (not necessarily to run) the module with gaussdb 3.1. +if ver(gaussdb.__version__) >= ver("3.2.0.dev0"): + cursor_classes.append(gaussdb.RawServerCursor) @pytest.fixture(params=cursor_classes) @@ -25,28 +25,28 @@ def conn(conn, request, anyio_backend): def test_init_row_factory(conn): - with psycopg.ServerCursor(conn, "foo") as cur: + with gaussdb.ServerCursor(conn, "foo") as cur: assert cur.name == "foo" assert cur.connection is conn assert cur.row_factory is conn.row_factory conn.row_factory = rows.dict_row - with psycopg.ServerCursor(conn, "bar") as cur: + with gaussdb.ServerCursor(conn, "bar") as cur: assert cur.name == "bar" assert cur.row_factory is rows.dict_row - with psycopg.ServerCursor(conn, "baz", row_factory=rows.namedtuple_row) as cur: + with gaussdb.ServerCursor(conn, "baz", row_factory=rows.namedtuple_row) as cur: assert cur.name == "baz" assert cur.row_factory is rows.namedtuple_row def test_init_params(conn): - with psycopg.ServerCursor(conn, "foo") as cur: + with gaussdb.ServerCursor(conn, "foo") as cur: assert cur.scrollable is None assert cur.withhold is False - with psycopg.ServerCursor(conn, "bar", withhold=True, scrollable=False) as cur: + with gaussdb.ServerCursor(conn, "bar", withhold=True, scrollable=False) as cur: assert cur.scrollable is False assert cur.withhold is True @@ -62,7 +62,7 @@ def test_funny_name(conn): def test_repr(conn): cur = conn.cursor("my-name") - assert "psycopg.%s" % conn.server_cursor_factory.__name__ in str(cur) + assert "gaussdb.%s" % conn.server_cursor_factory.__name__ in str(cur) assert "my-name" in repr(cur) cur.close() diff --git a/tests/test_cursor_server_async.py b/tests/test_cursor_server_async.py index 7e1cd7c65..8f46fa459 100644 --- a/tests/test_cursor_server_async.py +++ b/tests/test_cursor_server_async.py @@ -1,19 +1,19 @@ import pytest from packaging.version import parse as ver -import psycopg -from psycopg import errors as e -from psycopg import pq, rows +import gaussdb +from gaussdb import errors as e +from gaussdb import pq, rows from .acompat import alist from ._test_cursor import ph pytestmark = pytest.mark.crdb_skip("server-side cursor") -cursor_classes = [psycopg.AsyncServerCursor] -# Allow to import (not necessarily to run) the module with psycopg 3.1. -if ver(psycopg.__version__) >= ver("3.2.0.dev0"): - cursor_classes.append(psycopg.AsyncRawServerCursor) +cursor_classes = [gaussdb.AsyncServerCursor] +# Allow to import (not necessarily to run) the module with gaussdb 3.1. +if ver(gaussdb.__version__) >= ver("3.2.0.dev0"): + cursor_classes.append(gaussdb.AsyncRawServerCursor) @pytest.fixture(params=cursor_classes) @@ -23,18 +23,18 @@ async def aconn(aconn, request, anyio_backend): async def test_init_row_factory(aconn): - async with psycopg.AsyncServerCursor(aconn, "foo") as cur: + async with gaussdb.AsyncServerCursor(aconn, "foo") as cur: assert cur.name == "foo" assert cur.connection is aconn assert cur.row_factory is aconn.row_factory aconn.row_factory = rows.dict_row - async with psycopg.AsyncServerCursor(aconn, "bar") as cur: + async with gaussdb.AsyncServerCursor(aconn, "bar") as cur: assert cur.name == "bar" assert cur.row_factory is rows.dict_row - async with psycopg.AsyncServerCursor( + async with gaussdb.AsyncServerCursor( aconn, "baz", row_factory=rows.namedtuple_row ) as cur: assert cur.name == "baz" @@ -42,11 +42,11 @@ async def test_init_row_factory(aconn): async def test_init_params(aconn): - async with psycopg.AsyncServerCursor(aconn, "foo") as cur: + async with gaussdb.AsyncServerCursor(aconn, "foo") as cur: assert cur.scrollable is None assert cur.withhold is False - async with psycopg.AsyncServerCursor( + async with gaussdb.AsyncServerCursor( aconn, "bar", withhold=True, scrollable=False ) as cur: assert cur.scrollable is False @@ -64,7 +64,7 @@ async def test_funny_name(aconn): async def test_repr(aconn): cur = aconn.cursor("my-name") - assert "psycopg.%s" % aconn.server_cursor_factory.__name__ in str(cur) + assert "gaussdb.%s" % aconn.server_cursor_factory.__name__ in str(cur) assert "my-name" in repr(cur) await cur.close() diff --git a/tests/test_dns.py b/tests/test_dns.py index ded4f8408..ebf2a9b06 100644 --- a/tests/test_dns.py +++ b/tests/test_dns.py @@ -1,7 +1,7 @@ import pytest -import psycopg -from psycopg.conninfo import conninfo_to_dict +import gaussdb +from gaussdb.conninfo import conninfo_to_dict @pytest.mark.dns @@ -10,7 +10,7 @@ async def test_resolve_hostaddr_async_warning(recwarn): import_dnspython() conninfo = "dbname=foo" params = conninfo_to_dict(conninfo) - params = await psycopg._dns.resolve_hostaddr_async( # type: ignore[attr-defined] + params = await gaussdb._dns.resolve_hostaddr_async( # type: ignore[attr-defined] params ) assert "resolve_hostaddr_async" in str(recwarn.pop(DeprecationWarning).message) @@ -22,4 +22,4 @@ def import_dnspython(): except ImportError: pytest.skip("dnspython package not available") - import psycopg._dns # noqa: F401 + import gaussdb._dns # noqa: F401 diff --git a/tests/test_dns_srv.py b/tests/test_dns_srv.py index d41d1405e..4f43ddb8f 100644 --- a/tests/test_dns_srv.py +++ b/tests/test_dns_srv.py @@ -2,8 +2,8 @@ import pytest -import psycopg -from psycopg.conninfo import conninfo_to_dict +import gaussdb +from gaussdb.conninfo import conninfo_to_dict from .test_dns import import_dnspython @@ -48,7 +48,7 @@ def test_srv(conninfo, want, env, fake_srv, setpgenv): setpgenv(env) params = conninfo_to_dict(conninfo) - params = psycopg._dns.resolve_srv(params) # type: ignore[attr-defined] + params = gaussdb._dns.resolve_srv(params) # type: ignore[attr-defined] assert conninfo_to_dict(want) == params @@ -57,7 +57,7 @@ def test_srv(conninfo, want, env, fake_srv, setpgenv): async def test_srv_async(conninfo, want, env, afake_srv, setpgenv): setpgenv(env) params = conninfo_to_dict(conninfo) - params = await psycopg._dns.resolve_srv_async(params) # type: ignore[attr-defined] + params = await gaussdb._dns.resolve_srv_async(params) # type: ignore[attr-defined] assert conninfo_to_dict(want) == params @@ -71,8 +71,8 @@ async def test_srv_async(conninfo, want, env, afake_srv, setpgenv): def test_srv_bad(conninfo, env, fake_srv, setpgenv): setpgenv(env) params = conninfo_to_dict(conninfo) - with pytest.raises(psycopg.OperationalError): - psycopg._dns.resolve_srv(params) # type: ignore[attr-defined] + with pytest.raises(gaussdb.OperationalError): + gaussdb._dns.resolve_srv(params) # type: ignore[attr-defined] @pytest.mark.anyio @@ -80,15 +80,15 @@ def test_srv_bad(conninfo, env, fake_srv, setpgenv): async def test_srv_bad_async(conninfo, env, afake_srv, setpgenv): setpgenv(env) params = conninfo_to_dict(conninfo) - with pytest.raises(psycopg.OperationalError): - await psycopg._dns.resolve_srv_async(params) # type: ignore[attr-defined] + with pytest.raises(gaussdb.OperationalError): + await gaussdb._dns.resolve_srv_async(params) # type: ignore[attr-defined] @pytest.fixture def fake_srv(monkeypatch): f = get_fake_srv_function(monkeypatch) monkeypatch.setattr( - psycopg._dns.resolver, # type: ignore[attr-defined] + gaussdb._dns.resolver, # type: ignore[attr-defined] "resolve", f, ) @@ -102,7 +102,7 @@ async def af(qname, rdtype): return f(qname, rdtype) monkeypatch.setattr( - psycopg._dns.async_resolver, # type: ignore[attr-defined] + gaussdb._dns.async_resolver, # type: ignore[attr-defined] "resolve", af, ) diff --git a/tests/test_encodings.py b/tests/test_encodings.py index 0b6f10f6a..9256fecab 100644 --- a/tests/test_encodings.py +++ b/tests/test_encodings.py @@ -2,8 +2,8 @@ import pytest -import psycopg -from psycopg import _encodings as encodings +import gaussdb +from gaussdb import _encodings as encodings def test_names_normalised(): @@ -40,7 +40,7 @@ def test_pg2py(pyenc, pgenc): @pytest.mark.parametrize("pgenc", ["MULE_INTERNAL", "EUC_TW"]) def test_pg2py_missing(pgenc): - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): encodings.pg2pyenc(pgenc.encode()) diff --git a/tests/test_errors.py b/tests/test_errors.py index 1dc8a6c23..7f5ac57a0 100644 --- a/tests/test_errors.py +++ b/tests/test_errors.py @@ -7,9 +7,9 @@ import pytest -import psycopg -from psycopg import errors as e -from psycopg import pq +import gaussdb +from gaussdb import errors as e +from gaussdb import pq from .utils import eur from .fix_crdb import is_crdb @@ -295,7 +295,7 @@ def test_unknown_sqlstate(conn): def test_pgconn_error(conn_cls): - with pytest.raises(psycopg.OperationalError) as excinfo: + with pytest.raises(gaussdb.OperationalError) as excinfo: conn_cls.connect("dbname=nosuchdb") exc = excinfo.value @@ -304,7 +304,7 @@ def test_pgconn_error(conn_cls): def test_pgconn_error_pickle(conn_cls): - with pytest.raises(psycopg.OperationalError) as excinfo: + with pytest.raises(gaussdb.OperationalError) as excinfo: conn_cls.connect("dbname=nosuchdb") exc = pickle.loads(pickle.dumps(excinfo.value)) @@ -341,7 +341,7 @@ def test_blank_sqlstate(conn): ], ) def test_strip_severity_unstripped(msg): - from psycopg.pq.misc import strip_severity + from gaussdb.pq.misc import strip_severity out = strip_severity(msg) assert out == msg.strip() @@ -356,7 +356,7 @@ def test_strip_severity_unstripped(msg): ], ) def test_strip_severity_l10n(msg): - from psycopg.pq.misc import strip_severity + from gaussdb.pq.misc import strip_severity out = strip_severity(msg) assert out == msg.split(":", 1)[1].strip() diff --git a/tests/test_psycopg_dbapi20.py b/tests/test_gaussdb_dbapi20.py similarity index 88% rename from tests/test_psycopg_dbapi20.py rename to tests/test_gaussdb_dbapi20.py index 768b98f8b..2e0b599cc 100644 --- a/tests/test_psycopg_dbapi20.py +++ b/tests/test_gaussdb_dbapi20.py @@ -5,8 +5,8 @@ import pytest -import psycopg -from psycopg.conninfo import conninfo_to_dict +import gaussdb +from gaussdb.conninfo import conninfo_to_dict from . import dbapi20, dbapi20_tpc @@ -18,7 +18,7 @@ def with_dsn(request, session_dsn): @pytest.mark.usefixtures("with_dsn") class PsycopgTests(dbapi20.DatabaseAPI20Test): - driver = psycopg + driver = gaussdb # connect_args = () # set by the fixture connect_kw_args: dict[Any, Any] = {} @@ -34,11 +34,11 @@ def test_setoutputsize(self): @pytest.mark.usefixtures("tpc") @pytest.mark.usefixtures("with_dsn") class PsycopgTPCTests(dbapi20_tpc.TwoPhaseCommitTests): - driver = psycopg + driver = gaussdb connect_args = () # set by the fixture def connect(self): - return psycopg.connect(*self.connect_args) + return gaussdb.connect(*self.connect_args) # Shut up warnings @@ -70,7 +70,7 @@ def connect(self): ], ) def test_singletons(conn, typename, singleton): - singleton = getattr(psycopg, singleton) + singleton = getattr(gaussdb, singleton) cur = conn.cursor() cur.execute(f"select null::{typename}") oid = cur.description[0].type_code @@ -88,7 +88,7 @@ def test_singletons(conn, typename, singleton): ], ) def test_timestamp_from_ticks(ticks, want): - s = psycopg.TimestampFromTicks(ticks) + s = gaussdb.TimestampFromTicks(ticks) want = dt.datetime.strptime(want, "%Y-%m-%dT%H:%M:%S.%f%z") assert s == want @@ -102,7 +102,7 @@ def test_timestamp_from_ticks(ticks, want): ], ) def test_date_from_ticks(ticks, want): - s = psycopg.DateFromTicks(ticks) + s = gaussdb.DateFromTicks(ticks) if isinstance(want, str): want = [want] want = [dt.datetime.strptime(w, "%Y-%m-%d").date() for w in want] @@ -114,7 +114,7 @@ def test_date_from_ticks(ticks, want): [(0, "00:00:00.000000"), (1273173119.99992, "00:11:59.999920")], ) def test_time_from_ticks(ticks, want): - s = psycopg.TimeFromTicks(ticks) + s = gaussdb.TimeFromTicks(ticks) want = dt.datetime.strptime(want, "%H:%M:%S.%f").time() assert s.replace(hour=0) == want @@ -144,8 +144,8 @@ def fake_connect(conninfo, *, timeout=0.0): yield setpgenv({}) - monkeypatch.setattr(psycopg.generators, "connect", fake_connect) - conn = psycopg.connect(*args, **kwargs) + monkeypatch.setattr(gaussdb.generators, "connect", fake_connect) + conn = gaussdb.connect(*args, **kwargs) assert conninfo_to_dict(got_conninfo) == conninfo_to_dict(want) conn.close() @@ -155,9 +155,9 @@ def fake_connect(conninfo, *, timeout=0.0): [ (("host=foo", "host=bar"), {}, TypeError), (("", ""), {}, TypeError), - ((), {"nosuchparam": 42}, psycopg.ProgrammingError), + ((), {"nosuchparam": 42}, gaussdb.ProgrammingError), ], ) def test_connect_badargs(monkeypatch, pgconn, args, kwargs, exctype): with pytest.raises(exctype): - psycopg.connect(*args, **kwargs) + gaussdb.connect(*args, **kwargs) diff --git a/tests/test_generators.py b/tests/test_generators.py index 412a770e6..89ee9e134 100644 --- a/tests/test_generators.py +++ b/tests/test_generators.py @@ -4,9 +4,9 @@ import pytest -import psycopg -from psycopg import pq, waiting -from psycopg.conninfo import conninfo_to_dict, make_conninfo +import gaussdb +from gaussdb import pq, waiting +from gaussdb.conninfo import conninfo_to_dict, make_conninfo def test_connect_operationalerror_pgconn(generators, dsn, monkeypatch): @@ -28,7 +28,7 @@ def test_connect_operationalerror_pgconn(generators, dsn, monkeypatch): gen = generators.connect(dsn) with pytest.raises( - psycopg.OperationalError, match="connection failed:" + gaussdb.OperationalError, match="connection failed:" ) as excinfo: waiting.wait_conn(gen) @@ -39,7 +39,7 @@ def test_connect_operationalerror_pgconn(generators, dsn, monkeypatch): assert pgconn.status == pq.ConnStatus.BAD.value assert pgconn.transaction_status == pq.TransactionStatus.UNKNOWN.value assert pgconn.pipeline_status == pq.PipelineStatus.OFF.value - with pytest.raises(psycopg.OperationalError, match="connection is closed"): + with pytest.raises(gaussdb.OperationalError, match="connection is closed"): pgconn.exec_(b"select 1") @@ -173,8 +173,8 @@ def test_pipeline_communicate_abort(pgconn, pipeline_demo, pipeline, generators) @pytest.fixture def pipeline_uniqviol(pgconn): try: - psycopg.capabilities.has_pipeline(check=True) - except psycopg.NotSupportedError as ex: + gaussdb.capabilities.has_pipeline(check=True) + except gaussdb.NotSupportedError as ex: pytest.skip(str(ex)) assert pgconn.pipeline_status == 0 res = pgconn.exec_(b"DROP TABLE IF EXISTS pg_pipeline_uniqviol") diff --git a/tests/test_gevent.py b/tests/test_gevent.py index 68df41314..ef0909578 100644 --- a/tests/test_gevent.py +++ b/tests/test_gevent.py @@ -4,7 +4,7 @@ import pytest -import psycopg +import gaussdb pytest.importorskip("gevent") @@ -22,7 +22,7 @@ def test_gevent(dsn): import json import time import gevent -import psycopg +import gaussdb TICK = {TICK!r} dts = [] @@ -38,7 +38,7 @@ def ticker(): def querier(): time.sleep(TICK * 2) - with psycopg.connect({dsn!r}) as conn: + with gaussdb.connect({dsn!r}) as conn: conn.execute("select pg_sleep(0.3)") global queried @@ -56,30 +56,30 @@ def querier(): assert TICK <= dt < TICK * 1.1 -@pytest.mark.skipif("not psycopg._cmodule._psycopg") +@pytest.mark.skipif("not gaussdb._cmodule._gaussdb") def test_patched_dont_use_wait_c(): - if psycopg.waiting.wait is not psycopg.waiting.wait_c: + if gaussdb.waiting.wait is not gaussdb.waiting.wait_c: pytest.skip("wait_c not normally in use") script = """ import gevent.monkey gevent.monkey.patch_all() -import psycopg -assert psycopg.waiting.wait is not psycopg.waiting.wait_c +import gaussdb +assert gaussdb.waiting.wait is not gaussdb.waiting.wait_c """ sp.check_call([sys.executable, "-c", script]) -@pytest.mark.skipif("not psycopg._cmodule._psycopg") +@pytest.mark.skipif("not gaussdb._cmodule._gaussdb") def test_unpatched_still_use_wait_c(): - if psycopg.waiting.wait is not psycopg.waiting.wait_c: + if gaussdb.waiting.wait is not gaussdb.waiting.wait_c: pytest.skip("wait_c not normally in use") script = """ import gevent.monkey -import psycopg -assert psycopg.waiting.wait is psycopg.waiting.wait_c +import gaussdb +assert gaussdb.waiting.wait is gaussdb.waiting.wait_c """ sp.check_call([sys.executable, "-c", script]) diff --git a/tests/test_module.py b/tests/test_module.py index 2b1869e94..87b6ecfac 100644 --- a/tests/test_module.py +++ b/tests/test_module.py @@ -1,7 +1,7 @@ import pytest -from psycopg._cmodule import _psycopg -from psycopg.conninfo import conninfo_to_dict +from gaussdb._cmodule import _gaussdb +from gaussdb.conninfo import conninfo_to_dict @pytest.mark.parametrize( @@ -14,11 +14,11 @@ ], ) def test_connect(monkeypatch, dsn_env, args, kwargs, want, setpgenv): - # Check the main args passing from psycopg.connect to the conn generator + # Check the main args passing from gaussdb.connect to the conn generator # Details of the params manipulation are in test_conninfo. - import psycopg.connection + import gaussdb.connection - orig_connect = psycopg.generators.connect + orig_connect = gaussdb.generators.connect got_conninfo: str @@ -28,9 +28,9 @@ def mock_connect(conninfo, *, timeout): return orig_connect(dsn_env, timeout=timeout) setpgenv({}) - monkeypatch.setattr(psycopg.generators, "connect", mock_connect) + monkeypatch.setattr(gaussdb.generators, "connect", mock_connect) - conn = psycopg.connect(*args, **kwargs) + conn = gaussdb.connect(*args, **kwargs) assert conninfo_to_dict(got_conninfo) == conninfo_to_dict(want) conn.close() @@ -38,17 +38,17 @@ def mock_connect(conninfo, *, timeout): def test_version(mypy): cp = mypy.run_on_source( """\ -from psycopg import __version__ +from gaussdb import __version__ assert __version__ """ ) assert not cp.stdout -@pytest.mark.skipif(_psycopg is None, reason="C module test") +@pytest.mark.skipif(_gaussdb is None, reason="C module test") def test_version_c(mypy): - # can be psycopg_c, psycopg_binary - cpackage = _psycopg.__name__.split(".")[0] + # can be gaussdb_c, gaussdb_binary + cpackage = _gaussdb.__name__.split(".")[0] cp = mypy.run_on_source( f"""\ diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index 7a012aaee..f22b0d4f1 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -10,22 +10,22 @@ import pytest -import psycopg -from psycopg import errors as e -from psycopg import pq +import gaussdb +from gaussdb import errors as e +from gaussdb import pq from .acompat import is_async pytestmark = [ pytest.mark.pipeline, - pytest.mark.skipif("not psycopg.Pipeline.is_supported()"), + pytest.mark.skipif("not gaussdb.Pipeline.is_supported()"), ] pipeline_aborted = pytest.mark.flakey("the server might get in pipeline aborted") def test_repr(conn): with conn.pipeline() as p: - name = "psycopg.AsyncPipeline" if is_async(conn) else "psycopg.Pipeline" + name = "gaussdb.AsyncPipeline" if is_async(conn) else "gaussdb.Pipeline" assert name in repr(p) assert "[IDLE, pipeline=ON]" in repr(p) @@ -40,7 +40,7 @@ def test_connection_closed(conn): pass -def test_pipeline_status(conn: psycopg.Connection[Any]) -> None: +def test_pipeline_status(conn: gaussdb.Connection[Any]) -> None: assert conn._pipeline is None with conn.pipeline() as p: assert conn._pipeline is p @@ -49,7 +49,7 @@ def test_pipeline_status(conn: psycopg.Connection[Any]) -> None: assert not conn._pipeline -def test_pipeline_reenter(conn: psycopg.Connection[Any]) -> None: +def test_pipeline_reenter(conn: gaussdb.Connection[Any]) -> None: with conn.pipeline() as p1: with conn.pipeline() as p2: assert p2 is p1 @@ -60,7 +60,7 @@ def test_pipeline_reenter(conn: psycopg.Connection[Any]) -> None: assert p1.status == pq.PipelineStatus.OFF # type: ignore[comparison-overlap] -def test_pipeline_broken_conn_exit(conn: psycopg.Connection[Any]) -> None: +def test_pipeline_broken_conn_exit(conn: gaussdb.Connection[Any]) -> None: with pytest.raises(e.OperationalError): with conn.pipeline(): conn.execute("select 1") @@ -71,7 +71,7 @@ def test_pipeline_broken_conn_exit(conn: psycopg.Connection[Any]) -> None: def test_pipeline_exit_error_noclobber(conn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg") + caplog.set_level(logging.WARNING, logger="gaussdb") with pytest.raises(ZeroDivisionError): with conn.pipeline(): conn.close() @@ -81,7 +81,7 @@ def test_pipeline_exit_error_noclobber(conn, caplog): def test_pipeline_exit_error_noclobber_nested(conn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg") + caplog.set_level(logging.WARNING, logger="gaussdb") with pytest.raises(ZeroDivisionError): with conn.pipeline(): with conn.pipeline(): @@ -110,13 +110,13 @@ def test_pipeline_nested_sync_trace(conn, trace): def test_cursor_stream(conn): with conn.pipeline(), conn.cursor() as cur: - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): next(cur.stream("select 1")) def test_server_cursor(conn): with conn.cursor(name="pipeline") as cur, conn.pipeline(): - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): cur.execute("select 1") @@ -439,11 +439,11 @@ def test_prepare_error(conn): """ conn.set_autocommit(True) stmt = "INSERT INTO nosuchtable(data) VALUES (%s)" - with pytest.raises(psycopg.errors.UndefinedTable): + with pytest.raises(gaussdb.errors.UndefinedTable): with conn.pipeline(): conn.execute(stmt, ["foo"], prepare=True) assert not conn._prepared._names - with pytest.raises(psycopg.errors.UndefinedTable): + with pytest.raises(gaussdb.errors.UndefinedTable): conn.execute(stmt, ["bar"]) @@ -460,7 +460,7 @@ def test_transaction(conn): with conn.transaction(): cur = conn.execute("select 'rb'") - raise psycopg.Rollback() + raise gaussdb.Rollback() (r,) = cur.fetchone() assert r == "rb" @@ -535,7 +535,7 @@ def test_rollback_transaction(conn): def test_message_0x33(conn): - # https://github.com/psycopg/psycopg/issues/314 + # https://github.com/gaussdb/gaussdb/issues/314 notices = [] conn.add_notice_handler(lambda diag: notices.append(diag.message_primary)) diff --git a/tests/test_pipeline_async.py b/tests/test_pipeline_async.py index 56a76bfef..3d87e6896 100644 --- a/tests/test_pipeline_async.py +++ b/tests/test_pipeline_async.py @@ -7,22 +7,22 @@ import pytest -import psycopg -from psycopg import errors as e -from psycopg import pq +import gaussdb +from gaussdb import errors as e +from gaussdb import pq from .acompat import anext, is_async pytestmark = [ pytest.mark.pipeline, - pytest.mark.skipif("not psycopg.Pipeline.is_supported()"), + pytest.mark.skipif("not gaussdb.Pipeline.is_supported()"), ] pipeline_aborted = pytest.mark.flakey("the server might get in pipeline aborted") async def test_repr(aconn): async with aconn.pipeline() as p: - name = "psycopg.AsyncPipeline" if is_async(aconn) else "psycopg.Pipeline" + name = "gaussdb.AsyncPipeline" if is_async(aconn) else "gaussdb.Pipeline" assert name in repr(p) assert "[IDLE, pipeline=ON]" in repr(p) @@ -37,7 +37,7 @@ async def test_connection_closed(aconn): pass -async def test_pipeline_status(aconn: psycopg.AsyncConnection[Any]) -> None: +async def test_pipeline_status(aconn: gaussdb.AsyncConnection[Any]) -> None: assert aconn._pipeline is None async with aconn.pipeline() as p: assert aconn._pipeline is p @@ -46,7 +46,7 @@ async def test_pipeline_status(aconn: psycopg.AsyncConnection[Any]) -> None: assert not aconn._pipeline -async def test_pipeline_reenter(aconn: psycopg.AsyncConnection[Any]) -> None: +async def test_pipeline_reenter(aconn: gaussdb.AsyncConnection[Any]) -> None: async with aconn.pipeline() as p1: async with aconn.pipeline() as p2: assert p2 is p1 @@ -57,7 +57,7 @@ async def test_pipeline_reenter(aconn: psycopg.AsyncConnection[Any]) -> None: assert p1.status == pq.PipelineStatus.OFF # type: ignore[comparison-overlap] -async def test_pipeline_broken_conn_exit(aconn: psycopg.AsyncConnection[Any]) -> None: +async def test_pipeline_broken_conn_exit(aconn: gaussdb.AsyncConnection[Any]) -> None: with pytest.raises(e.OperationalError): async with aconn.pipeline(): await aconn.execute("select 1") @@ -68,7 +68,7 @@ async def test_pipeline_broken_conn_exit(aconn: psycopg.AsyncConnection[Any]) -> async def test_pipeline_exit_error_noclobber(aconn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg") + caplog.set_level(logging.WARNING, logger="gaussdb") with pytest.raises(ZeroDivisionError): async with aconn.pipeline(): await aconn.close() @@ -78,7 +78,7 @@ async def test_pipeline_exit_error_noclobber(aconn, caplog): async def test_pipeline_exit_error_noclobber_nested(aconn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg") + caplog.set_level(logging.WARNING, logger="gaussdb") with pytest.raises(ZeroDivisionError): async with aconn.pipeline(): async with aconn.pipeline(): @@ -107,13 +107,13 @@ async def test_pipeline_nested_sync_trace(aconn, trace): async def test_cursor_stream(aconn): async with aconn.pipeline(), aconn.cursor() as cur: - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): await anext(cur.stream("select 1")) async def test_server_cursor(aconn): async with aconn.cursor(name="pipeline") as cur, aconn.pipeline(): - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): await cur.execute("select 1") @@ -438,11 +438,11 @@ async def test_prepare_error(aconn): """ await aconn.set_autocommit(True) stmt = "INSERT INTO nosuchtable(data) VALUES (%s)" - with pytest.raises(psycopg.errors.UndefinedTable): + with pytest.raises(gaussdb.errors.UndefinedTable): async with aconn.pipeline(): await aconn.execute(stmt, ["foo"], prepare=True) assert not aconn._prepared._names - with pytest.raises(psycopg.errors.UndefinedTable): + with pytest.raises(gaussdb.errors.UndefinedTable): await aconn.execute(stmt, ["bar"]) @@ -459,7 +459,7 @@ async def test_transaction(aconn): async with aconn.transaction(): cur = await aconn.execute("select 'rb'") - raise psycopg.Rollback() + raise gaussdb.Rollback() (r,) = await cur.fetchone() assert r == "rb" @@ -536,7 +536,7 @@ async def test_rollback_transaction(aconn): async def test_message_0x33(aconn): - # https://github.com/psycopg/psycopg/issues/314 + # https://github.com/gaussdb/gaussdb/issues/314 notices = [] aconn.add_notice_handler(lambda diag: notices.append(diag.message_primary)) diff --git a/tests/test_prepared.py b/tests/test_prepared.py index 25faf27eb..e7e6faf52 100644 --- a/tests/test_prepared.py +++ b/tests/test_prepared.py @@ -12,9 +12,9 @@ import pytest -import psycopg -from psycopg.rows import namedtuple_row -from psycopg.pq._debug import PGconnDebug +import gaussdb +from gaussdb.rows import namedtuple_row +from gaussdb.pq._debug import PGconnDebug @pytest.mark.parametrize("value", [None, 0, 3]) @@ -181,10 +181,10 @@ def test_evict_lru_deallocate(conn): assert got == [f"select {i}" for i in ["'a'", 6, 7, 8, 9]] -@pytest.mark.skipif("psycopg._cmodule._psycopg", reason="Python-only debug conn") +@pytest.mark.skipif("gaussdb._cmodule._gaussdb", reason="Python-only debug conn") def test_deallocate_or_close(conn, caplog): conn.pgconn = PGconnDebug(conn.pgconn) - caplog.set_level(logging.INFO, logger="psycopg.debug") + caplog.set_level(logging.INFO, logger="gaussdb.debug") conn.set_autocommit(True) conn.prepare_threshold = 0 @@ -194,7 +194,7 @@ def test_deallocate_or_close(conn, caplog): conn.execute("select 1::text") msgs = "\n".join((rec.message for rec in caplog.records)) - if psycopg.pq.__build_version__ >= 170000: + if gaussdb.pq.__build_version__ >= 170000: assert "PGconn.send_close_prepared" in msgs assert "DEALLOCATE" not in msgs else: diff --git a/tests/test_prepared_async.py b/tests/test_prepared_async.py index aa1cebf35..d5bb60736 100644 --- a/tests/test_prepared_async.py +++ b/tests/test_prepared_async.py @@ -9,9 +9,9 @@ import pytest -import psycopg -from psycopg.rows import namedtuple_row -from psycopg.pq._debug import PGconnDebug +import gaussdb +from gaussdb.rows import namedtuple_row +from gaussdb.pq._debug import PGconnDebug @pytest.mark.parametrize("value", [None, 0, 3]) @@ -180,10 +180,10 @@ async def test_evict_lru_deallocate(aconn): assert got == [f"select {i}" for i in ["'a'", 6, 7, 8, 9]] -@pytest.mark.skipif("psycopg._cmodule._psycopg", reason="Python-only debug conn") +@pytest.mark.skipif("gaussdb._cmodule._gaussdb", reason="Python-only debug conn") async def test_deallocate_or_close(aconn, caplog): aconn.pgconn = PGconnDebug(aconn.pgconn) - caplog.set_level(logging.INFO, logger="psycopg.debug") + caplog.set_level(logging.INFO, logger="gaussdb.debug") await aconn.set_autocommit(True) aconn.prepare_threshold = 0 @@ -193,7 +193,7 @@ async def test_deallocate_or_close(aconn, caplog): await aconn.execute("select 1::text") msgs = "\n".join(rec.message for rec in caplog.records) - if psycopg.pq.__build_version__ >= 170000: + if gaussdb.pq.__build_version__ >= 170000: assert "PGconn.send_close_prepared" in msgs assert "DEALLOCATE" not in msgs else: diff --git a/tests/test_query.py b/tests/test_query.py index 9247e521d..fa17924ea 100644 --- a/tests/test_query.py +++ b/tests/test_query.py @@ -1,9 +1,9 @@ import pytest -import psycopg -from psycopg import pq -from psycopg.adapt import PyFormat, Transformer -from psycopg._queries import PostgresQuery, _split_query +import gaussdb +from gaussdb import pq +from gaussdb.adapt import PyFormat, Transformer +from gaussdb._queries import PostgresQuery, _split_query @pytest.mark.parametrize( @@ -61,7 +61,7 @@ def test_split_query(input, want): ], ) def test_split_query_bad(input): - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): _split_query(input) @@ -158,5 +158,5 @@ def test_pq_query_badtype(query, params): ) def test_pq_query_badprog(query, params): pq = PostgresQuery(Transformer()) - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): pq.convert(query, params) diff --git a/tests/test_rows.py b/tests/test_rows.py index 79bf7fdda..b6115b66c 100644 --- a/tests/test_rows.py +++ b/tests/test_rows.py @@ -1,7 +1,7 @@ import pytest -import psycopg -from psycopg import rows +import gaussdb +from gaussdb import rows from .utils import eur @@ -108,7 +108,7 @@ def test_scalar_row(conn): assert cur.fetchone() == 1 cur.execute("select 1, 2") assert cur.fetchone() == 1 - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): cur.execute("select") @@ -119,7 +119,7 @@ def test_scalar_row(conn): def test_no_result(factory, conn): cur = conn.cursor(row_factory=factory_from_name(factory)) cur.execute("reset search_path") - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): cur.fetchone() diff --git a/tests/test_sql.py b/tests/test_sql.py index 3ca3bec5f..674bcc76e 100644 --- a/tests/test_sql.py +++ b/tests/test_sql.py @@ -1,17 +1,17 @@ -# test_sql.py - tests for the psycopg2.sql module +# test_sql.py - tests for the _GaussDB.sql module -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team import re import datetime as dt import pytest -from psycopg import ProgrammingError, pq, sql -from psycopg.adapt import PyFormat -from psycopg.types import TypeInfo -from psycopg._encodings import py2pgenc -from psycopg.types.string import StrDumper +from gaussdb import ProgrammingError, pq, sql +from gaussdb.adapt import PyFormat +from gaussdb.types import TypeInfo +from gaussdb._encodings import py2pgenc +from gaussdb.types.string import StrDumper from .utils import eur from .fix_crdb import crdb_encoding, crdb_scs_off diff --git a/tests/test_tpc.py b/tests/test_tpc.py index 90ba2f09d..8a07452ba 100644 --- a/tests/test_tpc.py +++ b/tests/test_tpc.py @@ -3,8 +3,8 @@ # DO NOT CHANGE! Change the original file instead. import pytest -import psycopg -from psycopg.pq import TransactionStatus +import gaussdb +from gaussdb.pq import TransactionStatus pytestmark = pytest.mark.crdb_skip("2-phase commit") @@ -17,7 +17,7 @@ def test_tpc_disabled(conn, pipeline): conn.rollback() conn.tpc_begin("x") - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): conn.tpc_prepare() @@ -275,13 +275,13 @@ def test_xid_unicode_unparsed(self, conn_cls, conn, dsn, tpc): def test_cancel_fails_prepared(self, conn, tpc): conn.tpc_begin("cancel") conn.tpc_prepare() - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): conn.cancel() - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): conn.cancel_safe() def test_tpc_recover_non_dbapi_connection(self, conn_cls, conn, dsn, tpc): - conn.row_factory = psycopg.rows.dict_row + conn.row_factory = gaussdb.rows.dict_row conn.tpc_begin("dict-connection") conn.tpc_prepare() conn.close() diff --git a/tests/test_tpc_async.py b/tests/test_tpc_async.py index 8a448c714..f03758ba4 100644 --- a/tests/test_tpc_async.py +++ b/tests/test_tpc_async.py @@ -1,7 +1,7 @@ import pytest -import psycopg -from psycopg.pq import TransactionStatus +import gaussdb +from gaussdb.pq import TransactionStatus pytestmark = pytest.mark.crdb_skip("2-phase commit") @@ -14,7 +14,7 @@ async def test_tpc_disabled(aconn, apipeline): await aconn.rollback() await aconn.tpc_begin("x") - with pytest.raises(psycopg.NotSupportedError): + with pytest.raises(gaussdb.NotSupportedError): await aconn.tpc_prepare() @@ -283,13 +283,13 @@ async def test_xid_unicode_unparsed(self, aconn_cls, aconn, dsn, tpc): async def test_cancel_fails_prepared(self, aconn, tpc): await aconn.tpc_begin("cancel") await aconn.tpc_prepare() - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): aconn.cancel() - with pytest.raises(psycopg.ProgrammingError): + with pytest.raises(gaussdb.ProgrammingError): await aconn.cancel_safe() async def test_tpc_recover_non_dbapi_connection(self, aconn_cls, aconn, dsn, tpc): - aconn.row_factory = psycopg.rows.dict_row + aconn.row_factory = gaussdb.rows.dict_row await aconn.tpc_begin("dict-connection") await aconn.tpc_prepare() await aconn.close() diff --git a/tests/test_transaction.py b/tests/test_transaction.py index 0e94ff4c5..a4e032ba4 100644 --- a/tests/test_transaction.py +++ b/tests/test_transaction.py @@ -5,9 +5,9 @@ import pytest -from psycopg import Rollback -from psycopg import errors as e -from psycopg import pq +from gaussdb import Rollback +from gaussdb import errors as e +from gaussdb import pq from ._test_transaction import create_test_table # noqa # autouse fixture from ._test_transaction import ExpectedException, crdb_skip_external_observer @@ -91,7 +91,7 @@ def test_context_inerror_rollback_no_clobber(conn_cls, conn, pipeline, dsn, capl # Only 'aconn' is possibly in pipeline mode, but the transaction and # checks are on 'conn2'. pytest.skip("not applicable") - caplog.set_level(logging.WARNING, logger="psycopg") + caplog.set_level(logging.WARNING, logger="gaussdb") with pytest.raises(ZeroDivisionError): with conn_cls.connect(dsn) as conn2: @@ -110,7 +110,7 @@ def test_context_inerror_rollback_no_clobber(conn_cls, conn, pipeline, dsn, capl @pytest.mark.crdb_skip("copy") def test_context_active_rollback_no_clobber(conn_cls, dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg") + caplog.set_level(logging.WARNING, logger="gaussdb") conn = conn_cls.connect(dsn) try: diff --git a/tests/test_transaction_async.py b/tests/test_transaction_async.py index a2ad4765b..8cc271b07 100644 --- a/tests/test_transaction_async.py +++ b/tests/test_transaction_async.py @@ -2,9 +2,9 @@ import pytest -from psycopg import Rollback -from psycopg import errors as e -from psycopg import pq +from gaussdb import Rollback +from gaussdb import errors as e +from gaussdb import pq from ._test_transaction import create_test_table # noqa # autouse fixture from ._test_transaction import ExpectedException, crdb_skip_external_observer @@ -90,7 +90,7 @@ async def test_context_inerror_rollback_no_clobber( # Only 'aconn' is possibly in pipeline mode, but the transaction and # checks are on 'conn2'. pytest.skip("not applicable") - caplog.set_level(logging.WARNING, logger="psycopg") + caplog.set_level(logging.WARNING, logger="gaussdb") with pytest.raises(ZeroDivisionError): async with await aconn_cls.connect(dsn) as conn2: @@ -110,7 +110,7 @@ async def test_context_inerror_rollback_no_clobber( @pytest.mark.crdb_skip("copy") async def test_context_active_rollback_no_clobber(aconn_cls, dsn, caplog): - caplog.set_level(logging.WARNING, logger="psycopg") + caplog.set_level(logging.WARNING, logger="gaussdb") conn = await aconn_cls.connect(dsn) try: diff --git a/tests/test_typeinfo.py b/tests/test_typeinfo.py index 3301b881d..5bfc8b8d3 100644 --- a/tests/test_typeinfo.py +++ b/tests/test_typeinfo.py @@ -1,12 +1,12 @@ import pytest -import psycopg -from psycopg import sql -from psycopg.pq import TransactionStatus -from psycopg.types import TypeInfo -from psycopg.types.enum import EnumInfo -from psycopg.types.range import RangeInfo -from psycopg.types.composite import CompositeInfo +import gaussdb +from gaussdb import sql +from gaussdb.pq import TransactionStatus +from gaussdb.types import TypeInfo +from gaussdb.types.enum import EnumInfo +from gaussdb.types.range import RangeInfo +from gaussdb.types.composite import CompositeInfo from .fix_crdb import crdb_encoding @@ -36,8 +36,8 @@ def test_fetch(conn, name, status, encoding): # TODO: add the schema? # assert info.schema == "pg_catalog" - assert info.oid == psycopg.adapters.types["text"].oid - assert info.array_oid == psycopg.adapters.types["text"].array_oid + assert info.oid == gaussdb.adapters.types["text"].oid + assert info.array_oid == gaussdb.adapters.types["text"].array_oid assert info.regtype == "text" @@ -66,8 +66,8 @@ async def test_fetch_async(aconn, name, status, encoding): assert info.name == "text" # assert info.schema == "pg_catalog" - assert info.oid == psycopg.adapters.types["text"].oid - assert info.array_oid == psycopg.adapters.types["text"].array_oid + assert info.oid == gaussdb.adapters.types["text"].oid + assert info.array_oid == gaussdb.adapters.types["text"].array_oid _name = pytest.mark.parametrize("name", ["nosuch", sql.Identifier("nosuch")]) @@ -88,13 +88,13 @@ async def test_fetch_async(aconn, name, status, encoding): @_info_cls def test_fetch_not_found(conn, name, status, info_cls, monkeypatch): if TypeInfo._has_to_regtype_function(conn): - exit_orig = psycopg.Transaction.__exit__ + exit_orig = gaussdb.Transaction.__exit__ def exit(self, exc_type, exc_val, exc_tb): assert exc_val is None return exit_orig(self, exc_type, exc_val, exc_tb) - monkeypatch.setattr(psycopg.Transaction, "__exit__", exit) + monkeypatch.setattr(gaussdb.Transaction, "__exit__", exit) status = getattr(TransactionStatus, status) if status == TransactionStatus.INTRANS: conn.execute("select 1") @@ -110,13 +110,13 @@ def exit(self, exc_type, exc_val, exc_tb): @_info_cls async def test_fetch_not_found_async(aconn, name, status, info_cls, monkeypatch): if TypeInfo._has_to_regtype_function(aconn): - exit_orig = psycopg.AsyncTransaction.__aexit__ + exit_orig = gaussdb.AsyncTransaction.__aexit__ async def aexit(self, exc_type, exc_val, exc_tb): assert exc_val is None return await exit_orig(self, exc_type, exc_val, exc_tb) - monkeypatch.setattr(psycopg.AsyncTransaction, "__aexit__", aexit) + monkeypatch.setattr(gaussdb.AsyncTransaction, "__aexit__", aexit) status = getattr(TransactionStatus, status) if status == TransactionStatus.INTRANS: await aconn.execute("select 1") @@ -163,13 +163,13 @@ def test_fetch_by_schema_qualified_string(conn, name): ], ) def test_registry_by_builtin_name(conn, name): - info = psycopg.adapters.types[name] + info = gaussdb.adapters.types[name] assert info.name == "text" assert info.oid == 25 def test_registry_empty(): - r = psycopg.types.TypesRegistry() + r = gaussdb.types.TypesRegistry() assert r.get("text") is None with pytest.raises(KeyError): r["text"] @@ -177,8 +177,8 @@ def test_registry_empty(): @pytest.mark.parametrize("oid, aoid", [(1, 2), (1, 0), (0, 2), (0, 0)]) def test_registry_invalid_oid(oid, aoid): - r = psycopg.types.TypesRegistry() - ti = psycopg.types.TypeInfo("test", oid, aoid) + r = gaussdb.types.TypesRegistry() + ti = gaussdb.types.TypeInfo("test", oid, aoid) r.add(ti) assert r["test"] is ti if oid: @@ -190,16 +190,16 @@ def test_registry_invalid_oid(oid, aoid): def test_registry_copy(): - r = psycopg.types.TypesRegistry(psycopg.postgres.types) + r = gaussdb.types.TypesRegistry(gaussdb.postgres.types) assert r.get("text") is r["text"] is r[25] assert r["text"].oid == 25 def test_registry_isolated(): - orig = psycopg.postgres.types + orig = gaussdb.postgres.types tinfo = orig["text"] - r = psycopg.types.TypesRegistry(orig) - tdummy = psycopg.types.TypeInfo("dummy", tinfo.oid, tinfo.array_oid) + r = gaussdb.types.TypesRegistry(orig) + tdummy = gaussdb.types.TypeInfo("dummy", tinfo.oid, tinfo.array_oid) r.add(tdummy) assert r[25] is r["dummy"] is tdummy assert orig[25] is r["text"] is tinfo diff --git a/tests/test_typing.py b/tests/test_typing.py index 911fca1f9..38650b37d 100644 --- a/tests/test_typing.py +++ b/tests/test_typing.py @@ -22,44 +22,44 @@ def test_typing_example(mypy, filename): "conn, type", [ ( - "psycopg.connect()", - "psycopg.Connection[Tuple[Any, ...]]", + "gaussdb.connect()", + "gaussdb.Connection[Tuple[Any, ...]]", ), ( - "psycopg.connect(row_factory=rows.tuple_row)", - "psycopg.Connection[Tuple[Any, ...]]", + "gaussdb.connect(row_factory=rows.tuple_row)", + "gaussdb.Connection[Tuple[Any, ...]]", ), ( - "psycopg.connect(row_factory=rows.dict_row)", - "psycopg.Connection[Dict[str, Any]]", + "gaussdb.connect(row_factory=rows.dict_row)", + "gaussdb.Connection[Dict[str, Any]]", ), ( - "psycopg.connect(row_factory=rows.namedtuple_row)", - "psycopg.Connection[NamedTuple]", + "gaussdb.connect(row_factory=rows.namedtuple_row)", + "gaussdb.Connection[NamedTuple]", ), ( - "psycopg.connect(row_factory=rows.class_row(Thing))", - "psycopg.Connection[Thing]", + "gaussdb.connect(row_factory=rows.class_row(Thing))", + "gaussdb.Connection[Thing]", ), ( - "psycopg.connect(row_factory=thing_row)", - "psycopg.Connection[Thing]", + "gaussdb.connect(row_factory=thing_row)", + "gaussdb.Connection[Thing]", ), ( - "psycopg.Connection.connect()", - "psycopg.Connection[Tuple[Any, ...]]", + "gaussdb.Connection.connect()", + "gaussdb.Connection[Tuple[Any, ...]]", ), ( - "psycopg.Connection.connect(row_factory=rows.dict_row)", - "psycopg.Connection[Dict[str, Any]]", + "gaussdb.Connection.connect(row_factory=rows.dict_row)", + "gaussdb.Connection[Dict[str, Any]]", ), ( - "await psycopg.AsyncConnection.connect()", - "psycopg.AsyncConnection[Tuple[Any, ...]]", + "await gaussdb.AsyncConnection.connect()", + "gaussdb.AsyncConnection[Tuple[Any, ...]]", ), ( - "await psycopg.AsyncConnection.connect(row_factory=rows.dict_row)", - "psycopg.AsyncConnection[Dict[str, Any]]", + "await gaussdb.AsyncConnection.connect(row_factory=rows.dict_row)", + "gaussdb.AsyncConnection[Dict[str, Any]]", ), ], ) @@ -72,77 +72,77 @@ def test_connection_type(conn, type, mypy): "conn, curs, type", [ ( - "psycopg.connect()", + "gaussdb.connect()", "conn.cursor()", - "psycopg.Cursor[Tuple[Any, ...]]", + "gaussdb.Cursor[Tuple[Any, ...]]", ), ( - "psycopg.connect(row_factory=rows.dict_row)", + "gaussdb.connect(row_factory=rows.dict_row)", "conn.cursor()", - "psycopg.Cursor[Dict[str, Any]]", + "gaussdb.Cursor[Dict[str, Any]]", ), ( - "psycopg.connect(row_factory=rows.dict_row)", + "gaussdb.connect(row_factory=rows.dict_row)", "conn.cursor(row_factory=rows.namedtuple_row)", - "psycopg.Cursor[NamedTuple]", + "gaussdb.Cursor[NamedTuple]", ), ( - "psycopg.connect(row_factory=rows.class_row(Thing))", + "gaussdb.connect(row_factory=rows.class_row(Thing))", "conn.cursor()", - "psycopg.Cursor[Thing]", + "gaussdb.Cursor[Thing]", ), ( - "psycopg.connect(row_factory=thing_row)", + "gaussdb.connect(row_factory=thing_row)", "conn.cursor()", - "psycopg.Cursor[Thing]", + "gaussdb.Cursor[Thing]", ), ( - "psycopg.connect()", + "gaussdb.connect()", "conn.cursor(row_factory=thing_row)", - "psycopg.Cursor[Thing]", + "gaussdb.Cursor[Thing]", ), # Async cursors ( - "await psycopg.AsyncConnection.connect()", + "await gaussdb.AsyncConnection.connect()", "conn.cursor()", - "psycopg.AsyncCursor[Tuple[Any, ...]]", + "gaussdb.AsyncCursor[Tuple[Any, ...]]", ), ( - "await psycopg.AsyncConnection.connect()", + "await gaussdb.AsyncConnection.connect()", "conn.cursor(row_factory=thing_row)", - "psycopg.AsyncCursor[Thing]", + "gaussdb.AsyncCursor[Thing]", ), # Server-side cursors ( - "psycopg.connect()", + "gaussdb.connect()", "conn.cursor(name='foo')", - "psycopg.ServerCursor[Tuple[Any, ...]]", + "gaussdb.ServerCursor[Tuple[Any, ...]]", ), ( - "psycopg.connect(row_factory=rows.dict_row)", + "gaussdb.connect(row_factory=rows.dict_row)", "conn.cursor(name='foo')", - "psycopg.ServerCursor[Dict[str, Any]]", + "gaussdb.ServerCursor[Dict[str, Any]]", ), ( - "psycopg.connect()", + "gaussdb.connect()", "conn.cursor(name='foo', row_factory=rows.dict_row)", - "psycopg.ServerCursor[Dict[str, Any]]", + "gaussdb.ServerCursor[Dict[str, Any]]", ), # Async server-side cursors ( - "await psycopg.AsyncConnection.connect()", + "await gaussdb.AsyncConnection.connect()", "conn.cursor(name='foo')", - "psycopg.AsyncServerCursor[Tuple[Any, ...]]", + "gaussdb.AsyncServerCursor[Tuple[Any, ...]]", ), ( - "await psycopg.AsyncConnection.connect(row_factory=rows.dict_row)", + "await gaussdb.AsyncConnection.connect(row_factory=rows.dict_row)", "conn.cursor(name='foo')", - "psycopg.AsyncServerCursor[Dict[str, Any]]", + "gaussdb.AsyncServerCursor[Dict[str, Any]]", ), ( - "await psycopg.AsyncConnection.connect()", + "await gaussdb.AsyncConnection.connect()", "conn.cursor(name='foo', row_factory=rows.dict_row)", - "psycopg.AsyncServerCursor[Dict[str, Any]]", + "gaussdb.AsyncServerCursor[Dict[str, Any]]", ), ], ) @@ -158,67 +158,67 @@ def test_cursor_type(conn, curs, type, mypy): "conn, curs, type", [ ( - "psycopg.connect()", - "psycopg.Cursor(conn)", - "psycopg.Cursor[Tuple[Any, ...]]", + "gaussdb.connect()", + "gaussdb.Cursor(conn)", + "gaussdb.Cursor[Tuple[Any, ...]]", ), ( - "psycopg.connect(row_factory=rows.dict_row)", - "psycopg.Cursor(conn)", - "psycopg.Cursor[Dict[str, Any]]", + "gaussdb.connect(row_factory=rows.dict_row)", + "gaussdb.Cursor(conn)", + "gaussdb.Cursor[Dict[str, Any]]", ), ( - "psycopg.connect(row_factory=rows.dict_row)", - "psycopg.Cursor(conn, row_factory=rows.namedtuple_row)", - "psycopg.Cursor[NamedTuple]", + "gaussdb.connect(row_factory=rows.dict_row)", + "gaussdb.Cursor(conn, row_factory=rows.namedtuple_row)", + "gaussdb.Cursor[NamedTuple]", ), # Async cursors ( - "await psycopg.AsyncConnection.connect()", - "psycopg.AsyncCursor(conn)", - "psycopg.AsyncCursor[Tuple[Any, ...]]", + "await gaussdb.AsyncConnection.connect()", + "gaussdb.AsyncCursor(conn)", + "gaussdb.AsyncCursor[Tuple[Any, ...]]", ), ( - "await psycopg.AsyncConnection.connect(row_factory=rows.dict_row)", - "psycopg.AsyncCursor(conn)", - "psycopg.AsyncCursor[Dict[str, Any]]", + "await gaussdb.AsyncConnection.connect(row_factory=rows.dict_row)", + "gaussdb.AsyncCursor(conn)", + "gaussdb.AsyncCursor[Dict[str, Any]]", ), ( - "await psycopg.AsyncConnection.connect()", - "psycopg.AsyncCursor(conn, row_factory=thing_row)", - "psycopg.AsyncCursor[Thing]", + "await gaussdb.AsyncConnection.connect()", + "gaussdb.AsyncCursor(conn, row_factory=thing_row)", + "gaussdb.AsyncCursor[Thing]", ), # Server-side cursors ( - "psycopg.connect()", - "psycopg.ServerCursor(conn, 'foo')", - "psycopg.ServerCursor[Tuple[Any, ...]]", + "gaussdb.connect()", + "gaussdb.ServerCursor(conn, 'foo')", + "gaussdb.ServerCursor[Tuple[Any, ...]]", ), ( - "psycopg.connect(row_factory=rows.dict_row)", - "psycopg.ServerCursor(conn, name='foo')", - "psycopg.ServerCursor[Dict[str, Any]]", + "gaussdb.connect(row_factory=rows.dict_row)", + "gaussdb.ServerCursor(conn, name='foo')", + "gaussdb.ServerCursor[Dict[str, Any]]", ), ( - "psycopg.connect(row_factory=rows.dict_row)", - "psycopg.ServerCursor(conn, 'foo', row_factory=rows.namedtuple_row)", - "psycopg.ServerCursor[NamedTuple]", + "gaussdb.connect(row_factory=rows.dict_row)", + "gaussdb.ServerCursor(conn, 'foo', row_factory=rows.namedtuple_row)", + "gaussdb.ServerCursor[NamedTuple]", ), # Async server-side cursors ( - "await psycopg.AsyncConnection.connect()", - "psycopg.AsyncServerCursor(conn, name='foo')", - "psycopg.AsyncServerCursor[Tuple[Any, ...]]", + "await gaussdb.AsyncConnection.connect()", + "gaussdb.AsyncServerCursor(conn, name='foo')", + "gaussdb.AsyncServerCursor[Tuple[Any, ...]]", ), ( - "await psycopg.AsyncConnection.connect(row_factory=rows.dict_row)", - "psycopg.AsyncServerCursor(conn, name='foo')", - "psycopg.AsyncServerCursor[Dict[str, Any]]", + "await gaussdb.AsyncConnection.connect(row_factory=rows.dict_row)", + "gaussdb.AsyncServerCursor(conn, name='foo')", + "gaussdb.AsyncServerCursor[Dict[str, Any]]", ), ( - "await psycopg.AsyncConnection.connect()", - "psycopg.AsyncServerCursor(conn, name='foo', row_factory=rows.dict_row)", - "psycopg.AsyncServerCursor[Dict[str, Any]]", + "await gaussdb.AsyncConnection.connect()", + "gaussdb.AsyncServerCursor(conn, name='foo', row_factory=rows.dict_row)", + "gaussdb.AsyncServerCursor[Dict[str, Any]]", ), ], ) @@ -254,7 +254,7 @@ def test_fetchone_type(conn_class, server_side, curs, type, mypy): if server_side: curs = curs.replace("(", "(name='foo',", 1) stmts = f"""\ -conn = {await_} psycopg.{conn_class}.connect() +conn = {await_} gaussdb.{conn_class}.connect() curs = {curs} obj = {await_} curs.fetchone() """ @@ -290,7 +290,7 @@ def test_iter_type(conn_class, server_side, curs, type, mypy): if server_side: curs = curs.replace("(", "(name='foo',", 1) stmts = f"""\ -conn = {await_}psycopg.{conn_class}.connect() +conn = {await_}gaussdb.{conn_class}.connect() curs = {curs} {async_}for obj in curs: pass @@ -323,7 +323,7 @@ def test_fetchsome_type(conn_class, server_side, curs, type, method, mypy): if server_side: curs = curs.replace("(", "(name='foo',", 1) stmts = f"""\ -conn = {await_} psycopg.{conn_class}.connect() +conn = {await_} gaussdb.{conn_class}.connect() curs = {curs} obj = {await_} curs.{method}() """ @@ -346,14 +346,14 @@ def test_cur_subclass_execute(mypy, conn_class, server_side): src = f"""\ from typing import Any, cast -import psycopg -from psycopg.rows import Row, TupleRow +import gaussdb +from gaussdb.rows import Row, TupleRow -class MyCursor(psycopg.{cur_base_class}[Row]): +class MyCursor(gaussdb.{cur_base_class}[Row]): pass {async_}def test() -> None: - conn = {await_} psycopg.{conn_class}.connect() + conn = {await_} gaussdb.{conn_class}.connect() cur: MyCursor[TupleRow] reveal_type(cur) @@ -380,15 +380,15 @@ def _test_reveal(stmts, type, mypy): from __future__ import annotations from typing import Any, Callable, Dict, NamedTuple, Sequence, Tuple -import psycopg -from psycopg import rows +import gaussdb +from gaussdb import rows class Thing: def __init__(self, **kwargs: Any) -> None: self.kwargs = kwargs def thing_row( - cur: psycopg.Cursor[Any] | psycopg.AsyncCursor[Any], + cur: gaussdb.Cursor[Any] | gaussdb.AsyncCursor[Any], ) -> Callable[[Sequence[Any]], Thing]: assert cur.description names = [d.name for d in cur.description] @@ -432,10 +432,10 @@ async def tmp() -> None: def test_generic_connect(conn, type, mypy): src = f""" from typing import Any, Dict, Tuple -import psycopg -from psycopg import rows +import gaussdb +from gaussdb import rows -class MyConnection(psycopg.Connection[rows.Row]): +class MyConnection(gaussdb.Connection[rows.Row]): pass obj = {conn} diff --git a/tests/test_waiting.py b/tests/test_waiting.py index 22ce74ef9..6e1cf34e3 100644 --- a/tests/test_waiting.py +++ b/tests/test_waiting.py @@ -5,9 +5,9 @@ import pytest -import psycopg -from psycopg import generators, waiting -from psycopg.pq import ConnStatus, ExecStatus +import gaussdb +from gaussdb import generators, waiting +from gaussdb.pq import ConnStatus, ExecStatus skip_if_not_linux = pytest.mark.skipif( not sys.platform.startswith("linux"), reason="non-Linux platform" @@ -23,7 +23,7 @@ "wait_epoll", marks=pytest.mark.skipif("not hasattr(select, 'epoll')") ), pytest.param("wait_poll", marks=pytest.mark.skipif("not hasattr(select, 'poll')")), - pytest.param("wait_c", marks=pytest.mark.skipif("not psycopg._cmodule._psycopg")), + pytest.param("wait_c", marks=pytest.mark.skipif("not gaussdb._cmodule._gaussdb")), ] events = ["R", "W", "RW"] @@ -40,7 +40,7 @@ def test_wait_conn(dsn, timeout): def test_wait_conn_bad(dsn): gen = generators.connect("dbname=nosuchdb") - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): waiting.wait_conn(gen) @@ -79,7 +79,7 @@ def test_wait_bad(pgconn, waitfn): pgconn.send_query(b"select 1") gen = generators.execute(pgconn) pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): waitfn(gen, pgconn.socket) @@ -127,7 +127,7 @@ def test_wait_large_fd(dsn, fname): except OSError: pytest.skip("can't open the number of files needed for the test") - pgconn = psycopg.pq.PGconn.connect(dsn.encode()) + pgconn = gaussdb.pq.PGconn.connect(dsn.encode()) try: assert pgconn.socket > 1024 pgconn.send_query(b"select 1") @@ -156,7 +156,7 @@ async def test_wait_conn_async(dsn, timeout): @pytest.mark.anyio async def test_wait_conn_async_bad(dsn): gen = generators.connect("dbname=nosuchdb") - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): await waiting.wait_conn_async(gen) @@ -190,5 +190,5 @@ async def test_wait_async_bad(pgconn): gen = generators.execute(pgconn) socket = pgconn.socket pgconn.finish() - with pytest.raises(psycopg.OperationalError): + with pytest.raises(gaussdb.OperationalError): await waiting.wait_async(gen, socket) diff --git a/tests/test_windows.py b/tests/test_windows.py index 9494765ad..709374700 100644 --- a/tests/test_windows.py +++ b/tests/test_windows.py @@ -3,7 +3,7 @@ import pytest -from psycopg.errors import InterfaceError +from gaussdb.errors import InterfaceError @pytest.mark.skipif(sys.platform != "win32", reason="windows only test") @@ -13,7 +13,7 @@ def test_windows_error(aconn_cls, dsn): async def go(): with pytest.raises( InterfaceError, - match="Psycopg cannot use the 'ProactorEventLoop'", + match="GaussDB cannot use the 'ProactorEventLoop'", ): await aconn_cls.connect(dsn) diff --git a/tests/test_xid.py b/tests/test_xid.py index 6b9bfcd81..e5d0c6e6a 100644 --- a/tests/test_xid.py +++ b/tests/test_xid.py @@ -1,27 +1,27 @@ -import psycopg +import gaussdb class TestXidObject: def test_xid_construction(self): - x1 = psycopg.Xid(74, "foo", "bar") + x1 = gaussdb.Xid(74, "foo", "bar") 74 == x1.format_id "foo" == x1.gtrid "bar" == x1.bqual def test_xid_from_string(self): - x2 = psycopg.Xid.from_string("42_Z3RyaWQ=_YnF1YWw=") + x2 = gaussdb.Xid.from_string("42_Z3RyaWQ=_YnF1YWw=") 42 == x2.format_id "gtrid" == x2.gtrid "bqual" == x2.bqual - x3 = psycopg.Xid.from_string("99_xxx_yyy") + x3 = gaussdb.Xid.from_string("99_xxx_yyy") None is x3.format_id "99_xxx_yyy" == x3.gtrid None is x3.bqual def test_xid_to_string(self): - x1 = psycopg.Xid.from_string("42_Z3RyaWQ=_YnF1YWw=") + x1 = gaussdb.Xid.from_string("42_Z3RyaWQ=_YnF1YWw=") str(x1) == "42_Z3RyaWQ=_YnF1YWw=" - x2 = psycopg.Xid.from_string("99_xxx_yyy") + x2 = gaussdb.Xid.from_string("99_xxx_yyy") str(x2) == "99_xxx_yyy" diff --git a/tests/types/test_array.py b/tests/types/test_array.py index f6656f594..45ff6ddcf 100644 --- a/tests/types/test_array.py +++ b/tests/types/test_array.py @@ -7,13 +7,13 @@ import pytest -import psycopg -import psycopg.types.numeric -from psycopg import pq, sql -from psycopg.adapt import Dumper, PyFormat, Transformer -from psycopg.types import TypeInfo -from psycopg.postgres import types as builtins -from psycopg.types.array import register_array +import gaussdb +import gaussdb.types.numeric +from gaussdb import pq, sql +from gaussdb.adapt import Dumper, PyFormat, Transformer +from gaussdb.types import TypeInfo +from gaussdb.postgres import types as builtins +from gaussdb.types.array import register_array from ..test_adapt import StrNoneBinaryDumper, StrNoneDumper @@ -124,7 +124,7 @@ def test_dump_list_int(conn, obj, want): ) def test_bad_binary_array(input): tx = Transformer() - with pytest.raises(psycopg.DataError): + with pytest.raises(gaussdb.DataError): tx.get_dumper(input, PyFormat.BINARY).dump(input) @@ -205,7 +205,7 @@ def test_numbers_array(num, type, fmt_in): @pytest.mark.parametrize("fmt_in", PyFormat) @pytest.mark.parametrize("fmt_out", pq.Format) def test_list_number_wrapper(conn, wrapper, fmt_in, fmt_out): - wrapper = getattr(psycopg.types.numeric, wrapper) + wrapper = getattr(gaussdb.types.numeric, wrapper) if wrapper is Decimal: want_cls = Decimal else: @@ -222,10 +222,10 @@ def test_list_number_wrapper(conn, wrapper, fmt_in, fmt_out): def test_mix_types(conn): - with pytest.raises(psycopg.DataError): + with pytest.raises(gaussdb.DataError): conn.execute("select %s", ([1, 0.5],)) - with pytest.raises(psycopg.DataError): + with pytest.raises(gaussdb.DataError): conn.execute("select %s", ([1, Decimal("0.5")],)) @@ -280,7 +280,7 @@ def __init__(self, x1, y1, x2, y2): class BoxDumper(Dumper): format = pq.Format.TEXT - oid = psycopg.postgres.types["box"].oid + oid = gaussdb.postgres.types["box"].oid def dump(self, box): return ("(%s,%s),(%s,%s)" % box.coords).encode() diff --git a/tests/types/test_bool.py b/tests/types/test_bool.py index 5646d4e1a..f71924ad6 100644 --- a/tests/types/test_bool.py +++ b/tests/types/test_bool.py @@ -1,8 +1,8 @@ import pytest -from psycopg import pq, sql -from psycopg.adapt import PyFormat, Transformer -from psycopg.postgres import types as builtins +from gaussdb import pq, sql +from gaussdb.adapt import PyFormat, Transformer +from gaussdb.postgres import types as builtins @pytest.mark.parametrize("fmt_in", PyFormat) diff --git a/tests/types/test_composite.py b/tests/types/test_composite.py index 37cc30463..b3c55b5dc 100644 --- a/tests/types/test_composite.py +++ b/tests/types/test_composite.py @@ -1,11 +1,11 @@ import pytest -from psycopg import postgres, pq, sql -from psycopg.adapt import PyFormat -from psycopg.postgres import types as builtins -from psycopg.types.range import Range -from psycopg.types.composite import CompositeInfo, TupleBinaryDumper, TupleDumper -from psycopg.types.composite import register_composite +from gaussdb import postgres, pq, sql +from gaussdb.adapt import PyFormat +from gaussdb.postgres import types as builtins +from gaussdb.types.range import Range +from gaussdb.types.composite import CompositeInfo, TupleBinaryDumper, TupleDumper +from gaussdb.types.composite import register_composite from ..utils import eur from ..fix_crdb import crdb_skip_message, is_crdb diff --git a/tests/types/test_datetime.py b/tests/types/test_datetime.py index 099295e9c..0a27e61ab 100644 --- a/tests/types/test_datetime.py +++ b/tests/types/test_datetime.py @@ -3,8 +3,8 @@ import pytest -from psycopg import DataError, pq, sql -from psycopg.adapt import PyFormat +from gaussdb import DataError, pq, sql +from gaussdb.adapt import PyFormat crdb_skip_datestyle = pytest.mark.crdb("skip", reason="set datestyle/intervalstyle") crdb_skip_negative_interval = pytest.mark.crdb("skip", reason="negative interval") @@ -131,7 +131,7 @@ def test_infinity_date_example(self, conn): # adding binary datetime adapters from datetime import date - from psycopg.types.datetime import DateDumper, DateLoader + from gaussdb.types.datetime import DateDumper, DateLoader class InfDateDumper(DateDumper): def dump(self, obj): diff --git a/tests/types/test_enum.py b/tests/types/test_enum.py index b92fb4361..f678f87e5 100644 --- a/tests/types/test_enum.py +++ b/tests/types/test_enum.py @@ -2,11 +2,11 @@ import pytest -from psycopg import errors as e -from psycopg import pq, sql -from psycopg.adapt import PyFormat -from psycopg.types import TypeInfo -from psycopg.types.enum import EnumInfo, register_enum +from gaussdb import errors as e +from gaussdb import pq, sql +from gaussdb.adapt import PyFormat +from gaussdb.types import TypeInfo +from gaussdb.types.enum import EnumInfo, register_enum from ..fix_crdb import crdb_encoding diff --git a/tests/types/test_hstore.py b/tests/types/test_hstore.py index 1648e2637..8c57bd067 100644 --- a/tests/types/test_hstore.py +++ b/tests/types/test_hstore.py @@ -1,8 +1,8 @@ import pytest -import psycopg -from psycopg.types import TypeInfo -from psycopg.types.hstore import HstoreLoader, register_hstore +import gaussdb +from gaussdb.types import TypeInfo +from gaussdb.types.hstore import HstoreLoader, register_hstore pytestmark = pytest.mark.crdb_skip("hstore") @@ -41,7 +41,7 @@ def test_parse_ok(s, d): ], ) def test_parse_bad(s): - with pytest.raises(psycopg.DataError): + with pytest.raises(gaussdb.DataError): loader = HstoreLoader(0, None) loader.load(s.encode()) @@ -71,7 +71,7 @@ def test_register_curs(hstore, conn): def test_register_globally(conn_cls, hstore, dsn, svcconn, global_adapters): info = TypeInfo.fetch(svcconn, "hstore") register_hstore(info) - assert psycopg.adapters.types[info.oid].name == "hstore" + assert gaussdb.adapters.types[info.oid].name == "hstore" assert svcconn.adapters.types.get(info.oid) is None conn = conn_cls.connect(dsn) diff --git a/tests/types/test_json.py b/tests/types/test_json.py index d245877c9..0f6c5fe34 100644 --- a/tests/types/test_json.py +++ b/tests/types/test_json.py @@ -3,10 +3,10 @@ import pytest -import psycopg.types -from psycopg import pq, sql -from psycopg.adapt import PyFormat -from psycopg.types.json import set_json_dumps, set_json_loads +import gaussdb.types +from gaussdb import pq, sql +from gaussdb.adapt import PyFormat +from gaussdb.types.json import set_json_dumps, set_json_loads samples = [ "null", @@ -23,7 +23,7 @@ @pytest.mark.parametrize("wrapper", ["Json", "Jsonb"]) @pytest.mark.parametrize("fmt_in", PyFormat) def test_wrapper_regtype(conn, wrapper, fmt_in): - wrapper = getattr(psycopg.types.json, wrapper) + wrapper = getattr(gaussdb.types.json, wrapper) cur = conn.cursor() cur.execute( f"select pg_typeof(%{fmt_in.value})::regtype = %s::regtype", @@ -36,7 +36,7 @@ def test_wrapper_regtype(conn, wrapper, fmt_in): @pytest.mark.parametrize("wrapper", ["Json", "Jsonb"]) @pytest.mark.parametrize("fmt_in", PyFormat) def test_dump(conn, val, wrapper, fmt_in): - wrapper = getattr(psycopg.types.json, wrapper) + wrapper = getattr(gaussdb.types.json, wrapper) obj = json.loads(val) cur = conn.cursor() cur.execute( @@ -58,7 +58,7 @@ def test_dump(conn, val, wrapper, fmt_in): def test_dump_dict(conn, fmt_in, pgtype, dumper_name): obj = {"foo": "bar"} cur = conn.cursor() - dumper = getattr(psycopg.types.json, dumper_name) + dumper = getattr(gaussdb.types.json, dumper_name) # Skip json on CRDB as the oid doesn't exist. try: @@ -80,7 +80,7 @@ def test_dump_dict(conn, fmt_in, pgtype, dumper_name): @pytest.mark.parametrize("wrapper", ["Json", "Jsonb"]) @pytest.mark.parametrize("fmt_in", PyFormat) def test_array_dump(conn, val, wrapper, fmt_in): - wrapper = getattr(psycopg.types.json, wrapper) + wrapper = getattr(gaussdb.types.json, wrapper) obj = json.loads(val) cur = conn.cursor() cur.execute( @@ -129,7 +129,7 @@ def test_load_copy(conn, val, jtype, fmt_out): @pytest.mark.parametrize("fmt_in", PyFormat) @pytest.mark.parametrize("wrapper", ["Json", "Jsonb"]) def test_dump_customise(conn, wrapper, fmt_in): - wrapper = getattr(psycopg.types.json, wrapper) + wrapper = getattr(gaussdb.types.json, wrapper) obj = {"foo": "bar"} cur = conn.cursor() @@ -144,7 +144,7 @@ def test_dump_customise(conn, wrapper, fmt_in): @pytest.mark.parametrize("fmt_in", PyFormat) @pytest.mark.parametrize("wrapper", ["Json", "Jsonb"]) def test_dump_customise_bytes(conn, wrapper, fmt_in): - wrapper = getattr(psycopg.types.json, wrapper) + wrapper = getattr(gaussdb.types.json, wrapper) obj = {"foo": "bar"} cur = conn.cursor() @@ -159,7 +159,7 @@ def test_dump_customise_bytes(conn, wrapper, fmt_in): @pytest.mark.parametrize("fmt_in", PyFormat) @pytest.mark.parametrize("wrapper", ["Json", "Jsonb"]) def test_dump_customise_context(conn, wrapper, fmt_in): - wrapper = getattr(psycopg.types.json, wrapper) + wrapper = getattr(gaussdb.types.json, wrapper) obj = {"foo": "bar"} cur1 = conn.cursor() cur2 = conn.cursor() @@ -174,7 +174,7 @@ def test_dump_customise_context(conn, wrapper, fmt_in): @pytest.mark.parametrize("fmt_in", PyFormat) @pytest.mark.parametrize("wrapper", ["Json", "Jsonb"]) def test_dump_customise_wrapper(conn, wrapper, fmt_in): - wrapper = getattr(psycopg.types.json, wrapper) + wrapper = getattr(gaussdb.types.json, wrapper) obj = {"foo": "bar"} cur = conn.cursor() cur.execute(f"select %{fmt_in.value}->>'baz' = 'qux'", (wrapper(obj, my_dumps),)) diff --git a/tests/types/test_net.py b/tests/types/test_net.py index a28ffb7e0..1ccfe2070 100644 --- a/tests/types/test_net.py +++ b/tests/types/test_net.py @@ -2,8 +2,8 @@ import pytest -from psycopg import pq, sql -from psycopg.adapt import PyFormat +from gaussdb import pq, sql +from gaussdb.adapt import PyFormat crdb_skip_inet = pytest.mark.crdb_skip("inet") crdb_skip_cidr = pytest.mark.crdb_skip("cidr") diff --git a/tests/types/test_none.py b/tests/types/test_none.py index 48ba7b50d..c9258a2e3 100644 --- a/tests/types/test_none.py +++ b/tests/types/test_none.py @@ -1,5 +1,5 @@ -from psycopg import sql -from psycopg.adapt import PyFormat, Transformer +from gaussdb import sql +from gaussdb.adapt import PyFormat, Transformer def test_quote_none(conn): diff --git a/tests/types/test_numeric.py b/tests/types/test_numeric.py index 517785de6..ba29ae7c7 100644 --- a/tests/types/test_numeric.py +++ b/tests/types/test_numeric.py @@ -6,11 +6,11 @@ import pytest -import psycopg -from psycopg import pq, sql -from psycopg.abc import Buffer -from psycopg.adapt import PyFormat, Transformer -from psycopg.types.numeric import FloatLoader, Int8, Int8BinaryDumper, Int8Dumper +import gaussdb +from gaussdb import pq, sql +from gaussdb.abc import Buffer +from gaussdb.adapt import PyFormat, Transformer +from gaussdb.types.numeric import FloatLoader, Int8, Int8BinaryDumper, Int8Dumper from ..fix_crdb import is_crdb @@ -576,7 +576,7 @@ def test_minus_minus_quote(conn, pgtype): @pytest.mark.parametrize("wrapper", "Int2 Int4 Int8 Oid Float4 Float8".split()) @pytest.mark.parametrize("fmt_in", PyFormat) def test_dump_wrapper(conn, wrapper, fmt_in): - wrapper = getattr(psycopg.types.numeric, wrapper) + wrapper = getattr(gaussdb.types.numeric, wrapper) obj = wrapper(1) cur = conn.execute( f"select %(obj){fmt_in.value} = 1, %(obj){fmt_in.value}", {"obj": obj} @@ -587,7 +587,7 @@ def test_dump_wrapper(conn, wrapper, fmt_in): @pytest.mark.parametrize("wrapper", "Int2 Int4 Int8 Oid Float4 Float8".split()) def test_dump_wrapper_oid(wrapper): - wrapper = getattr(psycopg.types.numeric, wrapper) + wrapper = getattr(gaussdb.types.numeric, wrapper) base = wrapper.__mro__[1] assert base in (int, float) n = base(3.14) @@ -599,10 +599,10 @@ def test_dump_wrapper_oid(wrapper): @pytest.mark.parametrize("wrapper", "Int2 Int4 Int8 Oid Float4 Float8".split()) @pytest.mark.parametrize("fmt_in", PyFormat) def test_repr_wrapper(conn, wrapper, fmt_in): - wrapper = getattr(psycopg.types.numeric, wrapper) + wrapper = getattr(gaussdb.types.numeric, wrapper) cur = conn.execute(f"select pg_typeof(%{fmt_in.value})::oid", [wrapper(0)]) oid = cur.fetchone()[0] - assert oid == psycopg.postgres.types[wrapper.__name__.lower()].oid + assert oid == gaussdb.postgres.types[wrapper.__name__.lower()].oid @pytest.mark.parametrize("fmt_out", pq.Format) diff --git a/tests/types/test_numpy.py b/tests/types/test_numpy.py index 23ce25f35..0de9f0fad 100644 --- a/tests/types/test_numpy.py +++ b/tests/types/test_numpy.py @@ -4,8 +4,8 @@ import pytest from packaging.version import parse as ver # noqa: F401 # used in skipif -from psycopg.pq import Format -from psycopg.adapt import PyFormat +from gaussdb.pq import Format +from gaussdb.adapt import PyFormat pytest.importorskip("numpy") diff --git a/tests/types/test_range.py b/tests/types/test_range.py index b724c016f..9dd4ddb19 100644 --- a/tests/types/test_range.py +++ b/tests/types/test_range.py @@ -4,10 +4,10 @@ import pytest -from psycopg import pq, sql -from psycopg.adapt import PyFormat -from psycopg.types import range as range_module -from psycopg.types.range import Range, RangeInfo, register_range +from gaussdb import pq, sql +from gaussdb.adapt import PyFormat +from gaussdb.types import range as range_module +from gaussdb.types.range import Range, RangeInfo, register_range from ..utils import eur from ..fix_crdb import crdb_skip_message, is_crdb diff --git a/tests/types/test_shapely.py b/tests/types/test_shapely.py index 39a41ef63..bf3e1994d 100644 --- a/tests/types/test_shapely.py +++ b/tests/types/test_shapely.py @@ -1,15 +1,15 @@ import pytest -import psycopg -from psycopg.pq import Format -from psycopg.adapt import PyFormat -from psycopg.types import TypeInfo +import gaussdb +from gaussdb.pq import Format +from gaussdb.adapt import PyFormat +from gaussdb.types import TypeInfo pytest.importorskip("shapely") from shapely.geometry import MultiPolygon, Point, Polygon -from psycopg.types.shapely import register_shapely, shapely_version +from gaussdb.types.shapely import register_shapely, shapely_version if shapely_version >= (2, 0): from shapely import get_srid, set_srid @@ -79,7 +79,7 @@ def shapely_conn(conn, svcconn): try: with svcconn.transaction(): svcconn.execute("create extension if not exists postgis") - except psycopg.Error as e: + except gaussdb.Error as e: pytest.skip(f"can't create extension postgis: {e}") info = TypeInfo.fetch(conn, "geometry") @@ -90,12 +90,12 @@ def shapely_conn(conn, svcconn): def test_no_adapter(conn): point = Point(1.2, 3.4) - with pytest.raises(psycopg.ProgrammingError, match="cannot adapt type 'Point'"): + with pytest.raises(gaussdb.ProgrammingError, match="cannot adapt type 'Point'"): conn.execute("SELECT pg_typeof(%s)", [point]).fetchone()[0] def test_no_info_error(conn): - from psycopg.types.shapely import register_shapely + from gaussdb.types.shapely import register_shapely with pytest.raises(TypeError, match="postgis.*extension"): register_shapely(None, conn) # type: ignore[arg-type] diff --git a/tests/types/test_string.py b/tests/types/test_string.py index 846105339..ccbf0ba3a 100644 --- a/tests/types/test_string.py +++ b/tests/types/test_string.py @@ -1,10 +1,10 @@ import pytest -import psycopg -from psycopg import Binary -from psycopg import errors as e -from psycopg import pq, sql -from psycopg.adapt import PyFormat +import gaussdb +from gaussdb import Binary +from gaussdb import errors as e +from gaussdb import pq, sql +from gaussdb.adapt import PyFormat from ..utils import eur from ..fix_crdb import crdb_encoding, crdb_scs_off @@ -52,20 +52,20 @@ def test_dump_zero(conn, fmt_in): pytest.skip("GaussDB allows null characters in strings for binary format.") cur = conn.cursor() s = "foo\x00bar" - with pytest.raises(psycopg.DataError): + with pytest.raises(gaussdb.DataError): cur.execute(f"select %{fmt_in.value}::text", (s,)) def test_quote_zero(conn): cur = conn.cursor() s = "foo\x00bar" - with pytest.raises(psycopg.DataError): + with pytest.raises(gaussdb.DataError): cur.execute(sql.SQL("select {}").format(sql.Literal(s))) # the only way to make this pass is to reduce %% -> % every time # not only when there are query arguments -# see https://github.com/psycopg/psycopg2/issues/825 +# see https://github.com/gaussdb/_GaussDB/issues/825 @pytest.mark.xfail def test_quote_percent(conn): cur = conn.cursor() @@ -151,7 +151,7 @@ def test_dump_text_oid(conn, fmt_in): with pytest.raises(e.IndeterminateDatatype): conn.execute(f"select concat(%{fmt_in.value}, %{fmt_in.value})", ["foo", "bar"]) - conn.adapters.register_dumper(str, psycopg.types.string.StrDumper) + conn.adapters.register_dumper(str, gaussdb.types.string.StrDumper) cur = conn.execute( f"select concat(%{fmt_in.value}, %{fmt_in.value})", ["foo", "bar"] ) @@ -188,7 +188,7 @@ def test_load_badenc(conn, typename, fmt_out): cur = conn.cursor(binary=fmt_out) conn.execute("set client_encoding to latin1") - with pytest.raises(psycopg.DataError): + with pytest.raises(gaussdb.DataError): cur.execute(f"select chr(%s)::{typename}", [ord(eur)]) stmt = sql.SQL("copy (select chr({})) to stdout (format {})").format( @@ -196,7 +196,7 @@ def test_load_badenc(conn, typename, fmt_out): ) with cur.copy(stmt) as copy: copy.set_types([typename]) - with pytest.raises(psycopg.DataError): + with pytest.raises(gaussdb.DataError): copy.read_row() diff --git a/tests/types/test_uuid.py b/tests/types/test_uuid.py index 31a932498..5b96695e4 100644 --- a/tests/types/test_uuid.py +++ b/tests/types/test_uuid.py @@ -4,8 +4,8 @@ import pytest -from psycopg import pq, sql -from psycopg.adapt import PyFormat +from gaussdb import pq, sql +from gaussdb.adapt import PyFormat @pytest.mark.parametrize("fmt_in", PyFormat) @@ -69,11 +69,11 @@ def test_uuid_load(conn, fmt_out, val): def test_lazy_load(dsn): script = f"""\ import sys -import psycopg +import gaussdb assert 'uuid' not in sys.modules -conn = psycopg.connect({dsn!r}) +conn = gaussdb.connect({dsn!r}) with conn.cursor() as cur: cur.execute("select repeat('1', 32)::uuid") cur.fetchone() diff --git a/tests/typing_example.py b/tests/typing_example.py index 01cd46026..17632b95b 100644 --- a/tests/typing_example.py +++ b/tests/typing_example.py @@ -6,8 +6,8 @@ from dataclasses import dataclass from collections.abc import Sequence -from psycopg import AsyncConnection, AsyncCursor, AsyncServerCursor, Connection, Cursor -from psycopg import ServerCursor, connect, rows +from gaussdb import AsyncConnection, AsyncCursor, AsyncServerCursor, Connection, Cursor +from gaussdb import ServerCursor, connect, rows def int_row_factory( diff --git a/tests/utils.py b/tests/utils.py index d3cd56805..2c3da8f71 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -182,14 +182,14 @@ def set_autocommit(conn, value): """ Set autocommit on a connection. - Give an uniform interface to both sync and async connection for psycopg - < 3.2, in order to run psycopg_pool 3.2 tests using psycopg 3.1. + Give an uniform interface to both sync and async connection for gaussdb + < 3.2, in order to run gaussdb_pool 3.2 tests using gaussdb 3.1. """ - import psycopg + import gaussdb - if isinstance(conn, psycopg.Connection): + if isinstance(conn, gaussdb.Connection): conn.autocommit = value - elif isinstance(conn, psycopg.AsyncConnection): + elif isinstance(conn, gaussdb.AsyncConnection): return conn.set_autocommit(value) else: raise TypeError(f"not a connection: {conn}") diff --git a/tools/async_to_sync.py b/tools/async_to_sync.py index 85746175f..68b361be5 100755 --- a/tools/async_to_sync.py +++ b/tools/async_to_sync.py @@ -30,13 +30,13 @@ PYVER = "3.11" ALL_INPUTS = """ - psycopg/psycopg/_conninfo_attempts_async.py - psycopg/psycopg/_copy_async.py - psycopg/psycopg/connection_async.py - psycopg/psycopg/cursor_async.py - psycopg_pool/psycopg_pool/null_pool_async.py - psycopg_pool/psycopg_pool/pool_async.py - psycopg_pool/psycopg_pool/sched_async.py + gaussdb/gaussdb/_conninfo_attempts_async.py + gaussdb/gaussdb/_copy_async.py + gaussdb/gaussdb/connection_async.py + gaussdb/gaussdb/cursor_async.py + gaussdb_pool/gaussdb_pool/null_pool_async.py + gaussdb_pool/gaussdb_pool/pool_async.py + gaussdb_pool/gaussdb_pool/sched_async.py tests/crdb/test_connection_async.py tests/crdb/test_copy_async.py tests/crdb/test_cursor_async.py @@ -177,8 +177,8 @@ def run_in_container(engine: Literal["docker", "podman"]) -> int: WORKDIR /src -ADD psycopg psycopg -RUN pip install ./psycopg[dev] +ADD gaussdb gaussdb +RUN pip install ./gaussdb[dev] ENTRYPOINT ["tools/async_to_sync.py"] """ @@ -324,8 +324,8 @@ class RenameAsyncToSync(ast.NodeTransformer): # type: ignore "ensure_table_async": "ensure_table", "find_insert_problem_async": "find_insert_problem", "pool_async": "pool", - "psycopg_pool.pool_async": "psycopg_pool.pool", - "psycopg_pool.sched_async": "psycopg_pool.sched", + "gaussdb_pool.pool_async": "gaussdb_pool.pool", + "gaussdb_pool.sched_async": "gaussdb_pool.sched", "sched_async": "sched", "test_pool_common_async": "test_pool_common", "wait_async": "wait", diff --git a/tools/bump_version.py b/tools/bump_version.py index d08b445a2..003312870 100755 --- a/tools/bump_version.py +++ b/tools/bump_version.py @@ -36,19 +36,19 @@ def __post_init__(self) -> None: packages: dict[str, Package] = {} Package( - name="psycopg", + name="gaussdb", toml_files=[ - PROJECT_DIR / "psycopg/pyproject.toml", - PROJECT_DIR / "psycopg_c/pyproject.toml", + PROJECT_DIR / "gaussdb/pyproject.toml", + PROJECT_DIR / "gaussdb_c/pyproject.toml", ], history_file=PROJECT_DIR / "docs/news.rst", tag_format="{version}", - extras=["psycopg-c", "psycopg-binary"], + extras=["gaussdb-c", "gaussdb-binary"], ) Package( - name="psycopg_pool", - toml_files=[PROJECT_DIR / "psycopg_pool/pyproject.toml"], + name="gaussdb_pool", + toml_files=[PROJECT_DIR / "gaussdb_pool/pyproject.toml"], history_file=PROJECT_DIR / "docs/news_pool.rst", tag_format="pool-{version}", extras=[], @@ -312,7 +312,7 @@ def parse_cmdline() -> Namespace: "-p", "--package", choices=list(packages.keys()), - default="psycopg", + default="gaussdb", help="the package to bump version [default: %(default)s]", ) diff --git a/tools/ci/build_macos_arm64.sh b/tools/ci/build_macos_arm64.sh index 8d6bcf593..f9313afc2 100755 --- a/tools/ci/build_macos_arm64.sh +++ b/tools/ci/build_macos_arm64.sh @@ -101,9 +101,9 @@ export CIBW_BUILD='cp{38,39,310,311,312,313}-*' export CIBW_TEST_REQUIRES="./psycopg[test] ./psycopg_pool" export CIBW_TEST_COMMAND="pytest {project}/tests -m 'not slow and not flakey' --color yes" -export PSYCOPG_IMPL=binary -export PSYCOPG_TEST_DSN="dbname=postgres" -export PSYCOPG_TEST_WANT_LIBPQ_BUILD=">= ${pg_version}" -export PSYCOPG_TEST_WANT_LIBPQ_IMPORT=">= ${pg_version}" +export GAUSSDB_IMPL=binary +export GAUSSDB_TEST_DSN="dbname=postgres" +export GAUSSDB_TEST_WANT_LIBPQ_BUILD=">= ${pg_version}" +export GAUSSDB_TEST_WANT_LIBPQ_IMPORT=">= ${pg_version}" cibuildwheel psycopg_binary diff --git a/tools/ci/copy_to_binary.py b/tools/ci/copy_to_binary.py index 154d4a37b..fb96efd86 100755 --- a/tools/ci/copy_to_binary.py +++ b/tools/ci/copy_to_binary.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Create the psycopg-binary package by renaming and patching psycopg-c +# Create the gaussdb-binary package by renaming and patching gaussdb-c from __future__ import annotations @@ -11,7 +11,7 @@ curdir = Path(__file__).parent pdir = curdir / "../.." -target = pdir / "psycopg_binary" +target = pdir / "gaussdb_binary" if target.exists(): raise Exception(f"path {target} already exists") @@ -26,14 +26,14 @@ def sed_i(pattern: str, repl: str, filename: str | Path) -> None: f.write(newdata) -shutil.copytree(pdir / "psycopg_c", target) -shutil.move(str(target / "psycopg_c"), str(target / "psycopg_binary")) +shutil.copytree(pdir / "gaussdb_c", target) +shutil.move(str(target / "gaussdb_c"), str(target / "gaussdb_binary")) shutil.move(str(target / "README-binary.rst"), str(target / "README.rst")) -sed_i("psycopg-c", "psycopg-binary", target / "pyproject.toml") -sed_i(r'"psycopg_c([\./][^"]+)?"', r'"psycopg_binary\1"', target / "pyproject.toml") -sed_i(r"__impl__\s*=.*", '__impl__ = "binary"', target / "psycopg_binary/pq.pyx") +sed_i("gaussdb-c", "gaussdb-binary", target / "pyproject.toml") +sed_i(r'"gaussdb_c([\./][^"]+)?"', r'"gaussdb_binary\1"', target / "pyproject.toml") +sed_i(r"__impl__\s*=.*", '__impl__ = "binary"', target / "gaussdb_binary/pq.pyx") for dirpath, dirnames, filenames in os.walk(target): for filename in filenames: if os.path.splitext(filename)[1] not in (".pyx", ".pxd", ".py"): continue - sed_i(r"\bpsycopg_c\b", "psycopg_binary", Path(dirpath) / filename) + sed_i(r"\bpsycopg_c\b", "gaussdb_binary", Path(dirpath) / filename) diff --git a/tools/isort-psycopg/README.rst b/tools/isort-gaussdb/README.rst similarity index 79% rename from tools/isort-psycopg/README.rst rename to tools/isort-gaussdb/README.rst index 317d4d3d1..9b7a4a732 100644 --- a/tools/isort-psycopg/README.rst +++ b/tools/isort-gaussdb/README.rst @@ -1,7 +1,7 @@ -Psycopg style isort +GaussDB style isort =================== -This is an isort_ plugin implementing the style used in the `Psycopg 3`_ +This is an isort_ plugin implementing the style used in the `gaussdb`_ project to sort: - imports in length order @@ -20,10 +20,10 @@ Example configuration:: profile = "black" length_sort = true multi_line_output = 9 - sort_order = "psycopg" + sort_order = "gaussdb" Note: because this is the first day I use isort at all, there is a chance that this plug-in is totally useless and the same can be done using isort features. .. _isort: https://pycqa.github.io/isort/ -.. _psycopg 3: https://www.psycopg.org/ +.. _gaussdb 3: https://www.gaussdb.org/ diff --git a/tools/isort-psycopg/isort_psycopg.py b/tools/isort-gaussdb/isort_gaussdb.py similarity index 100% rename from tools/isort-psycopg/isort_psycopg.py rename to tools/isort-gaussdb/isort_gaussdb.py diff --git a/tools/isort-psycopg/pyproject.toml b/tools/isort-gaussdb/pyproject.toml similarity index 68% rename from tools/isort-psycopg/pyproject.toml rename to tools/isort-gaussdb/pyproject.toml index e3b1aca79..fcdf1ddf1 100644 --- a/tools/isort-psycopg/pyproject.toml +++ b/tools/isort-gaussdb/pyproject.toml @@ -3,15 +3,15 @@ requires = ["setuptools", "wheel"] build-backend = "setuptools.build_meta" [project] -name = "isort-psycopg" +name = "isort-gaussdb" description = "isort plug-in to sort imports by module length first" # Note: to release a new version: # python -m build -o dist --wheel . -# twine upload dist/isort_psycopg-*-py3-none-any.whl -version = "0.0.2" +# twine upload dist/isort_gaussdb-*-py3-none-any.whl +version = "0.0.1" [project.urls] -Code = "https://github.com/psycopg/psycopg/tree/master/tools/isort-psycopg" +Code = "https://github.com/gaussdb/gaussdb/tree/master/tools/isort-gaussdb" [project.readme] file = "README.rst" @@ -22,4 +22,4 @@ name = "Daniele Varrazzo" email = "daniele.varrazzo@gmail.com" [project.entry-points."isort.sort_function"] -psycopg = "isort_psycopg:psycosort" +gaussdb = "isort_gaussdb:psycosort" diff --git a/tools/update_error_prefixes.py b/tools/update_error_prefixes.py index 1c975247e..de65883cf 100755 --- a/tools/update_error_prefixes.py +++ b/tools/update_error_prefixes.py @@ -73,7 +73,7 @@ def parse_cmdline() -> Namespace: break default_pgroot = default_pgroot.resolve() - default_dest = (HERE / "../psycopg/psycopg/pq/misc.py").resolve() + default_dest = (HERE / "../gaussdb/gaussdb/pq/misc.py").resolve() parser = ArgumentParser(description=__doc__) parser.add_argument( diff --git a/tools/update_errors.py b/tools/update_errors.py index cabecbacb..37b2fa9b1 100755 --- a/tools/update_errors.py +++ b/tools/update_errors.py @@ -6,7 +6,7 @@ The script can be run at a new PostgreSQL release to refresh the module. """ -# Copyright (C) 2020 The Psycopg Team +# Copyright (C) 2020 The GaussDB Team import os import re @@ -15,7 +15,7 @@ from collections import defaultdict, namedtuple from urllib.request import urlopen -from psycopg.errors import get_base_exception +from gaussdb.errors import get_base_exception logger = logging.getLogger() logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s") @@ -24,7 +24,7 @@ def main(): classes, errors = fetch_errors("9.6 10 11 12 13 14 15 16 17".split()) - fn = os.path.dirname(__file__) + "/../psycopg/psycopg/errors.py" + fn = os.path.dirname(__file__) + "/../gaussdb/gaussdb/errors.py" update_file(fn, generate_module_data(classes, errors)) fn = os.path.dirname(__file__) + "/../docs/api/errors.rst" diff --git a/tools/update_oids.py b/tools/update_oids.py index 45694417c..c6628686e 100755 --- a/tools/update_oids.py +++ b/tools/update_oids.py @@ -2,7 +2,7 @@ """ Update the maps of builtin types and names. -This script updates some of the files in psycopg source code with data read +This script updates some of the files in gaussdb source code with data read from a database catalog. Hint: use docker to upgrade types from a new version in isolation. Run: @@ -21,20 +21,20 @@ import subprocess as sp from pathlib import Path -import psycopg -from psycopg.pq import version_pretty -from psycopg.crdb import CrdbConnection -from psycopg.rows import TupleRow -from psycopg._compat import TypeAlias +import gaussdb +from gaussdb.pq import version_pretty +from gaussdb.crdb import CrdbConnection +from gaussdb.rows import TupleRow +from gaussdb._compat import TypeAlias -Connection: TypeAlias = psycopg.Connection[TupleRow] +Connection: TypeAlias = gaussdb.Connection[TupleRow] ROOT = Path(__file__).parent.parent def main() -> None: opt = parse_cmdline() - conn = psycopg.connect(opt.dsn, autocommit=True) + conn = gaussdb.connect(opt.dsn, autocommit=True) if CrdbConnection.is_crdb(conn): conn = CrdbConnection.connect(opt.dsn, autocommit=True) @@ -46,7 +46,7 @@ def main() -> None: def update_python_types(conn: Connection) -> None: - fn = ROOT / "psycopg/psycopg/postgres.py" + fn = ROOT / "gaussdb/gaussdb/postgres.py" lines = [] lines.extend(get_version_comment(conn)) @@ -59,7 +59,7 @@ def update_python_types(conn: Connection) -> None: def update_python_oids(conn: Connection) -> None: - fn = ROOT / "psycopg/psycopg/_oids.py" + fn = ROOT / "gaussdb/gaussdb/_oids.py" lines = [] lines.extend(get_version_comment(conn)) @@ -70,7 +70,7 @@ def update_python_oids(conn: Connection) -> None: def update_cython_oids(conn: Connection) -> None: - fn = ROOT / "psycopg_c/psycopg_c/_psycopg/oids.pxd" + fn = ROOT / "gaussdb_c/gaussdb_c/_gaussdb/oids.pxd" lines = [] lines.extend(get_version_comment(conn)) @@ -80,7 +80,7 @@ def update_cython_oids(conn: Connection) -> None: def update_crdb_python_oids(conn: Connection) -> None: - fn = ROOT / "psycopg/psycopg/crdb/_types.py" + fn = ROOT / "gaussdb/gaussdb/crdb/_types.py" lines = [] lines.extend(get_version_comment(conn))