Skip to content

Commit 7c25365

Browse files
committed
Merge branch 'master' into refactor/pep-518
2 parents 03ae597 + d7f380b commit 7c25365

File tree

14 files changed

+99
-144
lines changed

14 files changed

+99
-144
lines changed

.readthedocs.yml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ python:
1616
- doc
1717
- plotting
1818
- bloch_sphere_visualization
19-
- fancy_progressbar
2019

2120
sphinx:
2221
builder: html

doc/source/api.rst

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
API
2+
===
3+
4+
.. autosummary::
5+
:toctree: generated
6+
:recursive:
7+
8+
filter_functions

doc/source/conf.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,27 +38,28 @@
3838
'nbsphinx',
3939
'sphinx.ext.mathjax',
4040
'sphinx.ext.todo',
41+
'sphinx.ext.autodoc',
4142
'sphinx.ext.autosummary',
4243
'numpydoc',
4344
'sphinx.ext.extlinks',
4445
'sphinx.ext.viewcode',
4546
'sphinx.ext.ifconfig',
4647
'sphinx.ext.napoleon',
4748
'sphinx.ext.intersphinx',
48-
#'sphinxcontrib.apidoc',
49+
'sphinxcontrib.apidoc',
4950
#'IPython.sphinxext.ipython_console_highlighting',
5051
#'IPython.sphinxext.ipython_directive',
5152
#'matplotlib.sphinxext.only_directives',
5253
#'matplotlib.sphinxext.plot_directive',
5354
#'matplotlib.sphinxext.mathmpl',
54-
#'sphinx.ext.autodoc',
5555
#'sphinx.ext.doctest',
5656
]
5757

5858
# Apidoc config
5959
apidoc_module_dir = '../../filter_functions'
6060
apidoc_excluded_paths = ['../tests']
6161
apidoc_separate_modules = True
62+
apidoc_module_first = True
6263

6364
# Numpydoc settings
6465
numpydoc_show_inherited_class_members = False

doc/source/filter_functions.rst

Lines changed: 0 additions & 85 deletions
This file was deleted.

doc/source/index.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ Documentation
2323
:numbered:
2424

2525
examples/examples
26-
filter_functions API Documentation <filter_functions>
26+
filter_functions API Documentation <api>
2727

2828
Indices and tables
2929
==================

environment.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ channels:
66
dependencies:
77
- python >= 3.9
88
- qutip
9+
- pandoc
910
- pip
1011

1112
prefix: /home/docs/.conda/envs/filter_functions

filter_functions/basis.py

Lines changed: 25 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,6 @@
4747
import numpy as np
4848
import opt_einsum as oe
4949
from numpy import linalg as nla
50-
from numpy.core import ndarray
5150
from scipy import linalg as sla
5251
from sparse import COO
5352

@@ -56,7 +55,7 @@
5655
__all__ = ['Basis', 'expand', 'ggm_expand', 'normalize']
5756

5857

59-
class Basis(ndarray):
58+
class Basis(np.ndarray):
6059
r"""
6160
Class for operator bases. There are several ways to instantiate a
6261
Basis object:
@@ -217,22 +216,26 @@ def __eq__(self, other: object) -> bool:
217216
# Not ndarray
218217
return np.equal(self, other)
219218

220-
return np.allclose(self.view(ndarray), other.view(ndarray),
219+
return np.allclose(self.view(np.ndarray), other.view(np.ndarray),
221220
atol=self._atol, rtol=self._rtol)
222221

223-
def __contains__(self, item: ndarray) -> bool:
222+
def __contains__(self, item: np.ndarray) -> bool:
224223
"""Implement 'in' operator."""
225-
return any(np.isclose(item.view(ndarray), self.view(ndarray),
224+
return any(np.isclose(item.view(np.ndarray), self.view(np.ndarray),
226225
rtol=self._rtol, atol=self._atol).all(axis=(1, 2)))
227226

228-
def __array_wrap__(self, out_arr, context=None):
227+
def __array_wrap__(self, arr, context=None, return_scalar=False):
229228
"""
230229
Fixes problem that ufuncs return 0-d arrays instead of scalars.
231230
232231
https://github.com/numpy/numpy/issues/5819#issue-72454838
233232
"""
234-
if out_arr.ndim:
235-
return ndarray.__array_wrap__(self, out_arr, context)
233+
try:
234+
return super().__array_wrap__(arr, context, return_scalar=True)
235+
except TypeError:
236+
if arr.ndim:
237+
# Numpy < 2
238+
return np.ndarray.__array_wrap__(self, arr, context)
236239

237240
def _print_checks(self) -> None:
238241
"""Print checks for debug purposes."""
@@ -265,7 +268,7 @@ def isorthonorm(self) -> bool:
265268
actual = U.conj() @ U.T
266269
target = np.identity(dim)
267270
atol = self._eps*(self.d**2)**3
268-
self._isorthonorm = np.allclose(actual.view(ndarray), target,
271+
self._isorthonorm = np.allclose(actual.view(np.ndarray), target,
269272
atol=atol, rtol=self._rtol)
270273

271274
return self._isorthonorm
@@ -278,13 +281,16 @@ def istraceless(self) -> bool:
278281
if self._istraceless is None:
279282
trace = np.einsum('...jj', self)
280283
trace = util.remove_float_errors(trace, self.d**2)
281-
nonzero = trace.nonzero()
284+
nonzero = np.atleast_1d(trace).nonzero()
282285
if nonzero[0].size == 0:
283286
self._istraceless = True
284287
elif nonzero[0].size == 1:
285288
# Single element has nonzero trace, check if (proportional to)
286289
# identity
287-
elem = self[nonzero][0].view(ndarray) if self.ndim == 3 else self.view(ndarray)
290+
if self.ndim == 3:
291+
elem = self[nonzero][0].view(np.ndarray)
292+
else:
293+
elem = self.view(np.ndarray)
288294
offdiag_nonzero = elem[~np.eye(self.d, dtype=bool)].nonzero()
289295
diag_equal = np.diag(elem) == elem[0, 0]
290296
if diag_equal.all() and not offdiag_nonzero[0].any():
@@ -597,7 +603,7 @@ def _full_from_partial(elems: Sequence, traceless: bool, labels: Sequence[str])
597603
# sort Identity label to the front, default to first if not found
598604
# (should not happen since traceless checks that it is present)
599605
id_idx = next((i for i, elem in enumerate(elems)
600-
if np.allclose(Id.view(ndarray), elem.view(ndarray),
606+
if np.allclose(Id.view(np.ndarray), elem.view(np.ndarray),
601607
rtol=elems._rtol, atol=elems._atol)), 0)
602608
labels.insert(0, labels.pop(id_idx))
603609

@@ -606,7 +612,7 @@ def _full_from_partial(elems: Sequence, traceless: bool, labels: Sequence[str])
606612
return basis, labels
607613

608614

609-
def _norm(b: Sequence) -> ndarray:
615+
def _norm(b: Sequence) -> np.ndarray:
610616
"""Frobenius norm with two singleton dimensions inserted at the end."""
611617
b = np.asanyarray(b)
612618
norm = nla.norm(b, axis=(-1, -2))
@@ -633,8 +639,8 @@ def normalize(b: Basis) -> Basis:
633639
return (b/_norm(b)).squeeze().view(Basis)
634640

635641

636-
def expand(M: Union[ndarray, Basis], basis: Union[ndarray, Basis],
637-
normalized: bool = True, hermitian: bool = False, tidyup: bool = False) -> ndarray:
642+
def expand(M: Union[np.ndarray, Basis], basis: Union[np.ndarray, Basis],
643+
normalized: bool = True, hermitian: bool = False, tidyup: bool = False) -> np.ndarray:
638644
r"""
639645
Expand the array *M* in the basis given by *basis*.
640646
@@ -684,8 +690,8 @@ def cast(arr):
684690
return util.remove_float_errors(coefficients) if tidyup else coefficients
685691

686692

687-
def ggm_expand(M: Union[ndarray, Basis], traceless: bool = False,
688-
hermitian: bool = False) -> ndarray:
693+
def ggm_expand(M: Union[np.ndarray, Basis], traceless: bool = False,
694+
hermitian: bool = False) -> np.ndarray:
689695
r"""
690696
Expand the matrix *M* in a Generalized Gell-Mann basis [Bert08]_.
691697
This function makes use of the explicit construction prescription of
@@ -767,7 +773,7 @@ def cast(arr):
767773
return coeffs.squeeze() if square else coeffs
768774

769775

770-
def equivalent_pauli_basis_elements(idx: Union[Sequence[int], int], N: int) -> ndarray:
776+
def equivalent_pauli_basis_elements(idx: Union[Sequence[int], int], N: int) -> np.ndarray:
771777
"""
772778
Get the indices of the equivalent (up to identities tensored to it)
773779
basis elements of Pauli bases of qubits at position idx in the total
@@ -780,7 +786,7 @@ def equivalent_pauli_basis_elements(idx: Union[Sequence[int], int], N: int) -> n
780786
return elem_idx
781787

782788

783-
def remap_pauli_basis_elements(order: Sequence[int], N: int) -> ndarray:
789+
def remap_pauli_basis_elements(order: Sequence[int], N: int) -> np.ndarray:
784790
"""
785791
For a N-qubit Pauli basis, transpose the order of the subsystems and
786792
return the indices that permute the old basis to the new.

filter_functions/pulse_sequence.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -237,6 +237,17 @@ class PulseSequence:
237237
Due to the heavy use of NumPy's :func:`~numpy.einsum` function,
238238
results have a floating point error of ~1e-13.
239239
"""
240+
__array_interface__ = {
241+
'shape': (),
242+
'typestr': '|O',
243+
'version': 3
244+
}
245+
"""Describes to NumPy how to convert this object into an array.
246+
247+
Since :class:`PulseSequence` is iterable (through
248+
:meth:`__getitem__`), NumPy would otherwise try to create an
249+
ndarray of single-segment :class:`PulseSequence` s.
250+
"""
240251

241252
def __init__(self, *args, **kwargs) -> None:
242253
"""Initialize a PulseSequence instance."""

filter_functions/util.py

Lines changed: 20 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,7 @@
2626
:func:`abs2`
2727
Absolute value squared
2828
:func:`get_indices_from_identifiers`
29-
The the indices of a subset of identifiers within a list of
30-
identifiers.
29+
The indices of a subset of identifiers within a list of identifiers.
3130
:func:`tensor`
3231
Fast, flexible tensor product of an arbitrary number of inputs using
3332
:func:`~numpy.einsum`
@@ -70,6 +69,7 @@
7069
import functools
7170
import inspect
7271
import operator
72+
import os
7373
import string
7474
from itertools import zip_longest
7575
from typing import Callable, Iterable, List, Optional, Sequence, Tuple, Union
@@ -79,17 +79,25 @@
7979

8080
from .types import Operator, State
8181

82-
try:
83-
import ipynbname
84-
_NOTEBOOK_NAME = ipynbname.name()
85-
except (ImportError, IndexError, FileNotFoundError):
86-
_NOTEBOOK_NAME = ''
8782

88-
if _NOTEBOOK_NAME:
89-
from tqdm.notebook import tqdm as _tqdm
83+
def _in_notebook_kernel():
84+
# https://github.com/jupyterlab/jupyterlab/issues/16282
85+
return 'JPY_SESSION_NAME' in os.environ and os.environ['JPY_SESSION_NAME'].endswith('.ipynb')
86+
87+
88+
def _in_jupyter_kernel():
89+
# https://discourse.jupyter.org/t/how-to-know-from-python-script-if-we-are-in-jupyterlab/23993
90+
return 'JPY_PARENT_PID' in os.environ
91+
92+
93+
if not _in_notebook_kernel():
94+
if _in_jupyter_kernel():
95+
# (10/24) Autonotebook gets confused in jupyter consoles
96+
from tqdm.std import tqdm
97+
else:
98+
from tqdm.autonotebook import tqdm
9099
else:
91-
# Either not running notebook or not able to determine
92-
from tqdm import tqdm as _tqdm
100+
from tqdm.notebook import tqdm
93101

94102
__all__ = ['paulis', 'abs2', 'all_array_equal', 'dot_HS', 'get_sample_frequencies',
95103
'hash_array_along_axis', 'mdot', 'oper_equiv', 'progressbar', 'remove_float_errors',
@@ -1067,7 +1075,7 @@ def progressbar(iterable: Iterable, *args, **kwargs):
10671075
for i in progressbar(range(10)):
10681076
do_something()
10691077
"""
1070-
return _tqdm(iterable, *args, **kwargs)
1078+
return tqdm(iterable, *args, **kwargs)
10711079

10721080

10731081
def progressbar_range(*args, show_progressbar: bool = True, **kwargs):

pyproject.toml

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -39,19 +39,17 @@ bloch_sphere_visualization = [
3939
"filter_functions[plotting]",
4040
"qutip",
4141
]
42-
fancy_progressbar = [
43-
"ipynbname",
44-
"jupyter"
45-
]
4642
doc = [
4743
"jupyter",
4844
"nbsphinx",
4945
"numpydoc",
5046
"sphinx",
5147
"sphinx_rtd_theme",
48+
"sphinxcontrib-apidoc",
5249
"ipympl",
5350
"qutip-qip",
54-
"qutip-qtrl"
51+
"qutip-qtrl",
52+
"numpy<2"
5553
]
5654
tests = [
5755
"pytest >= 4.6",
@@ -61,7 +59,6 @@ tests = [
6159
all = [
6260
"filter_functions[plotting]",
6361
"filter_functions[bloch_sphere_visualization]",
64-
"filter_functions[fancy_progressbar]",
6562
"filter_functions[doc]",
6663
"filter_functions[tests]",
6764
]

0 commit comments

Comments
 (0)