Commit 84f3d312 authored by Tobias Hangleiter's avatar Tobias Hangleiter
Browse files

Make tensor more flexible and fix imports

parent e2af6558
......@@ -6,8 +6,8 @@ Functions
:meth:`abs2` :
Fast absolute value squared
:meth:`tensor`
Fast tensor product of an arbitrary number of inputs using
:meth:`~numpy.einsum`
Fast, flexible tensor product of an arbitrary number of inputs using
:meth:`~numpy.einsum` that supports broadcasting
:meth:`mdot`
Multiple matrix product along an axis
:meth:`ptrace`
......@@ -28,12 +28,12 @@ Functions
import operator
import string
from functools import reduce
from itertools import zip_longest
from typing import List, Sequence, Tuple, Union
import numpy as np
import qutip as qt
from numpy import ndarray
from numpy.linalg import norm
from numpy import linalg, ndarray
from .qi import P_np
......@@ -78,20 +78,21 @@ if numba:
def tensor(*args, rank: int = 2, optimize: Union[bool, str] = False):
r"""
Fast tensor product using einsum. The product is taken over the last *rank*
axes (the exception being ``rank == 1`` since a column vector is also
represented as a 2d-array) and broadcast over the remaining axes which thus
need to be of the same shape.
"""
Fast, flexible tensor product using einsum. The product is taken over the
last *rank* axes (the exception being ``rank == 1`` since a column vector
is also represented as a 2d-array) and broadcast over the remaining axes
which thus need to follow numpy broadcasting rules.
For example, the following shapes are compatible.
- for matrices (``rank == 2``)::
(a, b, c, d, d), (a, b, c, e, e) -> (a, b, c, d*e, d*e)
(a, b, c), (a, d, e) -> (a, b*d, c*e)
(a, b), (c, d, e) -> (c, a*d, b*e)
- for vectors (``rank == 1``)::
(a, b, c, d), (a, b, c, e) -> (a, b, c, d*e)
(a, b, 1, d), (a, 1, c, e) -> (a, b, c, d*e)
(a, b), (a, c) -> (a, b*c)
(a, b, 1), (a, c) -> (a, b, c)
......@@ -105,7 +106,7 @@ def tensor(*args, rank: int = 2, optimize: Union[bool, str] = False):
remaining axes are broadcast over.
optimize : bool|str, optional
Optimize the tensor contraction order. Passed through to
:meth:`oe.contract`.
:meth:`numpy.einsum`.
Examples
--------
......@@ -126,26 +127,32 @@ def tensor(*args, rank: int = 2, optimize: Union[bool, str] = False):
True
>>> A, B = np.random.randn(1, 3), np.random.randn(3, 4)
>>> result = tensor(A, B)
>>> result.shape == (1*3, 3*4)
True
>>> A, B = np.random.randn(3, 1, 2), np.random.randn(2, 2, 2)
>>> try:
... result = tensor(A, B, rank=1)
... except ValueError as err:
... result = tensor(A, B, rank=2)
... except ValueError as err: # cannot broadcast over axis 0
... print(err)
Require all args to have the same shape except on the last 1 axes.
>>> result = tensor(A, B, rank=2)
>>> result.shape == (1*3, 3*4)
Incompatible shapes. Could not compute tensor(tensor(*args[:1], rank=2),
args[1], rank=2) with shapes (3, 1, 2) and (2, 2, 2).
>>> result = tensor(A, B, rank=3)
>>> result.shape == (3*2, 1*2, 2*2)
True
See Also
--------
:meth:`numpy.kron`
"""
# Squeeze out extra dimension of vectors
args = [arg[None, :] if arg.ndim == 1 else arg for arg in args]
if all(1 in arg.shape[-2:] for arg in args) and rank == 1:
if rank == 1 and all(1 in arg.shape[-2:] for arg in args):
# Vectors, but numpy arrays are still two-dimensional
rank += 1
if len(set(arg.shape[:-rank] for arg in args)) != 1:
raise ValueError('Require all args to have the same shape except ' +
'on the last {} axes.'.format(rank))
chars = string.ascii_lowercase + string.ascii_uppercase
# All the subscripts we need
A_chars = chars[:rank]
......@@ -154,20 +161,59 @@ def tensor(*args, rank: int = 2, optimize: Union[bool, str] = False):
A_chars, B_chars, ''.join(i + j for i, j in zip(A_chars, B_chars))
)
def get_outshape(A, B):
"""Get tensor product result's shape"""
broadcast_shape = ()
# Loop over dimensions from last to first, filling the 'shorter' shape
# with 1's once it is exhausted
for dims in zip_longest(A.shape[-rank-1::-1], B.shape[-rank-1::-1],
fillvalue=1):
if 1 in dims:
# Broadcast 1-d of argument to dimension of other
broadcast_shape = (max(dims),) + broadcast_shape
elif len(set(dims)) == 1:
# Both arguments have same dimension on axis.
broadcast_shape = dims[:1] + broadcast_shape
else:
# Pass the Exception through to binary_tensor_wrapper for a
# meaningful error message
raise ValueError
# Shape of the actual tensor product is product of each dimension
tensor_shape = tuple(
reduce(operator.mul, dimensions)
for dimensions in zip(*[arg.shape[-rank:] for arg in (A, B)])
)
return broadcast_shape + tensor_shape
def binary_tensor(A, B):
"""Function to compute the Kronecker product of two tensors"""
"""Compute the Kronecker product of two tensors"""
if optimize:
path, _ = np.einsum_path(subscripts, A, B, optimize=optimize)
else:
path = False
outshape = A.shape[:-rank] + tuple(
reduce(operator.mul, dimensions)
for dimensions in zip(*[arg.shape[-rank:] for arg in (A, B)])
)
outshape = get_outshape(A, B)
return np.einsum(subscripts, A, B, optimize=path).reshape(outshape)
return reduce(binary_tensor, args)
def binary_tensor_wrapper(A, B):
"""Wrap binary_tensor to count function calls and catch Exceptions"""
try:
binary_tensor.calls += 1
result = binary_tensor(A, B)
except ValueError:
raise ValueError(
'Incompatible shapes. Could not compute tensor(tensor(*' +
'args[:{0}], rank={1}), args[{0}], rank={1}) '.format(
binary_tensor.calls, rank) +
'with shapes {} and {}.'.format(A.shape, B.shape)
)
return result
# Initialize function call counter to zero
binary_tensor.calls = 0
return reduce(binary_tensor_wrapper, args)
def mdot(arr: Sequence, axis: int = 0) -> ndarray:
......@@ -251,7 +297,7 @@ def pauli_expm(a: Sequence, nonzero_ind: List[bool]) -> ndarray:
P = np.asarray(P_np)
idx = np.asarray(nonzero_ind)
# Twice the rotation angle
theta = norm(a, axis=0)
theta = linalg.norm(a, axis=0)
# Rotation axis
n = a/theta
# Replace nans originating from divide by zero
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment