Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Request For Comment: Arithemtic between Operators and LazyOperators #86

Merged
merged 5 commits into from
Apr 24, 2023
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/QuantumOpticsBase.jl
Original file line number Diff line number Diff line change
Expand Up @@ -80,5 +80,6 @@ include("metrics.jl")
include("spinors.jl")
include("phasespace.jl")
include("printing.jl")
#include("operators_lazyaritmetic.jl")
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this probably should be deleted before merging

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh yeah... Done that now ;)


end # module
30 changes: 30 additions & 0 deletions src/operators_lazyproduct.jl
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,26 @@ isequal(x::LazyProduct{B1,B2}, y::LazyProduct{B1,B2}) where {B1,B2} = (samebases
# Arithmetic operations
-(a::T) where T<:LazyProduct = T(a.operators,a.ket_l,a.bra_r, -a.factor)


function +(a::LazyProduct{B1,B2},b::Operator{B1,B2}) where {B1,B2}
LazySum(a) + b
end
function +(a::Operator{B1,B2},b::LazyProduct{B1,B2}) where {B1,B2}
+(b,a)
end
function -(a::LazyProduct{B1,B2},b::LazyProduct{B1,B2}) where {B1,B2}
LazySum(a) - b
end
function +(a::LazyProduct{B1,B2},b::LazyProduct{B1,B2}) where {B1,B2}
LazySum(a) + LazySum(b)
end
function -(a::LazyProduct{B1,B2},b::Operator{B1,B2}) where {B1,B2}
LazySum(a) - b
end
function -(a::Operator{B1,B2},b::LazyProduct{B1,B2}) where {B1,B2}
a - LazySum(b)
end

*(a::LazyProduct{B1,B2}, b::LazyProduct{B2,B3}) where {B1,B2,B3} = LazyProduct((a.operators..., b.operators...), a.factor*b.factor)
*(a::LazyProduct, b::Number) = LazyProduct(a.operators, a.factor*b)
*(a::Number, b::LazyProduct) = LazyProduct(b.operators, a*b.factor)
Expand All @@ -74,6 +94,16 @@ permutesystems(op::LazyProduct, perm::Vector{Int}) = LazyProduct(([permutesystem
identityoperator(::Type{LazyProduct}, ::Type{S}, b1::Basis, b2::Basis) where S<:Number = LazyProduct(identityoperator(S, b1, b2))


#Assume same basis
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What does "assume same basis" means here? Is it something that would lead to bugs if not followed? Can we have tests for it, even if the test is basically just checking that an error is thrown instead of silently continuing with the operation?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What I meant is that BL and BR are the same since I'm not sure the methods defined would work if one considered operators with BL != BR. This is already handled by the dispatch, and the user should just get an error saying that addition for these types of operators is not defined. I will look into this and create specific tests for it.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

BL == BR is no longer assumed and code is updated accordingly

function tensor(a::Operator{B1,B1},b::LazyProduct{B, B, F, T, KTL, BTR}) where {B1,B, F, T, KTL, BTR}
ops = ([(i == 1 ? a : identityoperator(a)) ⊗ op for (i,op) in enumerate(b.operators)]...,)
LazyProduct(ops,b.factor)
end
function tensor(a::LazyProduct{B, B, F, T, KTL, BTR},b::Operator{B1,B1}) where {B1,B, F, T, KTL, BTR}
ops = ([op ⊗ (i == 1 ? b : identityoperator(b)) for (i,op) in enumerate(a.operators)]...,)
LazyProduct(ops,a.factor)
end

function mul!(result::Ket{B1},a::LazyProduct{B1,B2},b::Ket{B2},alpha,beta) where {B1,B2}
if length(a.operators)==1
mul!(result,a.operators[1],b,a.factor*alpha,beta)
Expand Down
13 changes: 13 additions & 0 deletions src/operators_lazysum.jl
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,19 @@ function /(a::LazySum, b::Number)
@samebases LazySum(a.basis_l, a.basis_r, factors, a.operators)
end

function tensor(a::Operator,b::LazySum)
btotal_l = a.basis_l ⊗ b.basis_l
btotal_r = a.basis_r ⊗ b.basis_r
ops = ([a ⊗ op for op in b.operators]...,)
LazySum(btotal_l,btotal_r,b.factors,ops)
end
function tensor(a::LazySum,b::Operator)
btotal_l = a.basis_l ⊗ b.basis_l
btotal_r = a.basis_r ⊗ b.basis_r
ops = ([op ⊗ b for op in a.operators]...,)
LazySum(btotal_l,btotal_r,a.factors,ops)
end

function dagger(op::LazySum)
ops = dagger.(op.operators)
@samebases LazySum(op.basis_r, op.basis_l, conj.(op.factors), ops)
Expand Down
48 changes: 37 additions & 11 deletions src/operators_lazytensor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -96,22 +96,48 @@ isequal(x::LazyTensor, y::LazyTensor) = samebases(x,y) && isequal(x.indices, y.i
# Arithmetic operations
-(a::LazyTensor) = LazyTensor(a, -a.factor)

function +(a::LazyTensor{B1,B2}, b::LazyTensor{B1,B2}) where {B1,B2}
if length(a.indices) == 1 && a.indices == b.indices
op = a.operators[1] * a.factor + b.operators[1] * b.factor
return LazyTensor(a.basis_l, a.basis_r, a.indices, (op,))
Copy link
Collaborator

@amilsted amilsted Apr 12, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the only place I am not sure about defaulting to laziness. It's quite a special case, but I encounter it quite a bit. I suppose the reason to do lazy summing here is mainly to be consistent with the laziness-preserving principle. I have some code that makes use of the existing behavior, but of course I can still do this kind of concrete summing manually if I want to, so I'm not arguing hard to keep it. What are your thoughts?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

My experience was that the previous implementation was very limiting. Especially since the custom operators I have been playing around with were not DataOperators but AbstractOperators, where the operation was defined via a function rather than a matrix. Therefore, these cannot be trivially added (except by using LazySum), and the above implementation fails. Also, length(a.indices) ==1 is required, and I could imagine situations where one would like to be able to add LazyTensors containing more than one operator.

However, one could perhaps keep the original behavior by dispatching on LazyTensors containing only one DataOperator. That is adding a function like this (draft, I'm not entirely sure it works):

const single_dataoperator{B1,B2} = LazyTensor{B1,B2,ComplexF64,Vector{Int64},Tuple{T}} where {B1,B2,T<:DataOperator} 
function +(a::T1,b::T2) where {T1 <: single_dataoperator{B1,B2},T2 <: single_dataoperator{B1,B2}}
    if length(a.indices) == 1 && a.indices == b.indices
        op = a.operators[1] * a.factor + b.operators[1] * b.factor
        return LazyTensor(a.basis_l, a.basis_r, a.indices, (op,))
    end
    throw(ArgumentError("Addition of LazyTensor operators is only defined in case both operators act nontrivially on the same, single tensor factor."))
end

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@mabuni1998 I think it's worth trying to keep the original intact as you suggest. If we can handle it via dispatch, we won't lose anything. Or am I missing some case here?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Collaborator

@amilsted amilsted Apr 14, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for finding a way to keep the original behavior. This is not type-stable, but I can't think of an obvious way to make it otherwise, except by letting LazyTensor indices be type parameters. Maybe it doesn't matter too much, as this will not typically be performance-critical?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Probably won't be performance-critical no, as you are most likely creating the operators once at the beginning of the simulation and then not changing them as you do multiplications etc.

end
throw(ArgumentError("Addition of LazyTensor operators is only defined in case both operators act nontrivially on the same, single tensor factor."))
function +(a::LazyTensor{B1,B2},b::LazyTensor{B1,B2}) where {B1,B2}
amilsted marked this conversation as resolved.
Show resolved Hide resolved
LazySum(a,b)
end
function -(a::LazyTensor{B1,B2},b::LazyTensor{B1,B2}) where {B1,B2}
LazySum((1,-1),(a,b))
end
function +(a::LazyTensor{B1,B2},b::Operator{B1,B2}) where {B1,B2}
LazySum(a) + b
end
function +(a::Operator{B1,B2},b::LazyTensor{B1,B2}) where {B1,B2}
+(b,a)
end
function -(a::LazyTensor{B1,B2},b::Operator{B1,B2}) where {B1,B2}
LazySum(a) - b
end
function -(a::Operator{B1,B2},b::LazyTensor{B1,B2}) where {B1,B2}
a - LazySum(b)
end

function -(a::LazyTensor{B1,B2}, b::LazyTensor{B1,B2}) where {B1,B2}
if length(a.indices) == 1 && a.indices == b.indices
op = a.operators[1] * a.factor - b.operators[1] * b.factor
return LazyTensor(a.basis_l, a.basis_r, a.indices, (op,))
function tensor(a::LazyTensor{B1,B1},b::Operator{B2,B2}) where {B1,B2}
if isequal(b,identityoperator(basis(b)))
btotal = basis(a) ⊗ basis(b)
LazyTensor(btotal,btotal,a.indices,(a.operators...,),a.factor)
elseif B2 <: CompositeBasis
throw(ArgumentError("tensor(a::LazyTensor{B1,B1},b::Operator{B2,B2}) is not implemented for B2 being CompositeBasis "))
else
a ⊗ LazyTensor(b.basis_l,b.basis_r,[1],(b,),1)
end
end
function tensor(a::Operator{B1,B1},b::LazyTensor{B2,B2}) where {B1,B2}
if isequal(a,identityoperator(basis(a)))
btotal = basis(a) ⊗ basis(b)
LazyTensor(btotal,btotal,b.indices.+length(basis(a).shape) ,(b.operators...,),b.factor)
elseif B1 <: CompositeBasis
throw(ArgumentError("tensor(a::Operator{B1,B1},b::LazyTensor{B2,B2}) is not implemented for B1 being CompositeBasis "))
else
LazyTensor(a.basis_l,a.basis_r,[1],(a,),1) ⊗ b
end
throw(ArgumentError("Subtraction of LazyTensor operators is only defined in case both operators act nontrivially on the same, single tensor factor."))
end



function *(a::LazyTensor{B1,B2}, b::LazyTensor{B2,B3}) where {B1,B2,B3}
indices = sort(union(a.indices, b.indices))
# ops = Vector{AbstractOperator}(undef, length(indices))
Expand Down
10 changes: 6 additions & 4 deletions test/test_abstractdata.jl
Original file line number Diff line number Diff line change
Expand Up @@ -412,9 +412,9 @@ x2 = Ket(b_r, rand(ComplexF64, length(b_r)))
xbra1 = Bra(b_l, rand(ComplexF64, length(b_l)))
xbra2 = Bra(b_l, rand(ComplexF64, length(b_l)))

# Addition
@test_throws ArgumentError op1 + op2
@test_throws ArgumentError op1 - op2
# Addition Addition of LazyTensor now returns LazySum
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Before merge I would suggest deleting the commented-out tests. They would not be informative to future readers, long after this merge. Future readers should use git blame if they want to know why a change was made.

Could you add tests for the failure modes of this contribution? E.g., @test_throws for where your code specifically raises an error to warn about something it can not do. And are there any other implicit assumptions about shapes and bases and other things? We do not want the user to obtain nonsense results if they do not happen to know that some combination of operations is not supported.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Updated code to no longer assume anything about BL == BR and added test to ensure that errors are thrown if addition between operators is not allowed.

#@test_throws ArgumentError op1 + op2
#@test_throws ArgumentError op1 - op2
@test D(-op1_, -op1, 1e-12)

# Test multiplication
Expand Down Expand Up @@ -448,7 +448,9 @@ xbra1 = Bra(b_l, rand(ComplexF64, length(b_l)))
xbra2 = Bra(b_l, rand(ComplexF64, length(b_l)))

# Addition
@test_throws ArgumentError op1 + op2
#Commented following line since addition of LazyProduct returns LazySum and is allowed.
#@test_throws ArgumentError op1 + op2
@test D(2.1*op1 + 0.3*op2, 2.1*op1_+0.3*op2_)
@test D(-op1_, -op1)

# Test multiplication
Expand Down
28 changes: 26 additions & 2 deletions test/test_operators_lazyproduct.jl
Original file line number Diff line number Diff line change
Expand Up @@ -62,9 +62,22 @@ x2 = Ket(b_l, rand(ComplexF64, length(b_l)))
xbra1 = Bra(b_l, rand(ComplexF64, length(b_l)))
xbra2 = Bra(b_l, rand(ComplexF64, length(b_l)))

# Addition
@test_throws ArgumentError op1 + op2
#Following is commented since addition between LazyProduct now returns LazySum
#@test_throws ArgumentError op1 + op2

# Test Addition
@test 1e-14 > D(-op1_, -op1)
@test 1e-14 > D(op1+op2, op1_+op2_)
@test 1e-14 > D(op1+op2_, op1_+op2_)
@test 1e-14 > D(op1_+op2, op1_+op2_)

# Test Subtraction
@test 1e-14 > D(op1 - op2, op1_ - op2_)
@test 1e-14 > D(op1 - op2_, op1_ - op2_)
@test 1e-14 > D(op1_ - op2, op1_ - op2_)
@test 1e-14 > D(op1 + (-op2), op1_ - op2_)
@test 1e-14 > D(op1 + (-1*op2), op1_ - op2_)


# Test multiplication
@test_throws DimensionMismatch op1a*op1a
Expand All @@ -78,6 +91,17 @@ xbra2 = Bra(b_l, rand(ComplexF64, length(b_l)))
# Test division
@test 1e-14 > D(op1/7, op1_/7)

#Test Tensor product (NOTE: it is assumed that BL == BR of both involved operators.)
op1_l = randoperator(b_l, b_l)
op2_l = randoperator(b_l, b_l)
op_r = randoperator(b_r, b_r)
op = op_r ⊗ LazyProduct([op1_l, sparse(op2_l)])*0.1
op_ = op_r ⊗ (0.1*op1_l*op2_l)
@test 1e-11 > D(op,op_)
op = LazyProduct([op1_l, sparse(op2_l)])*0.1 ⊗ op_r
op_ = (0.1*op1_l*op2_l) ⊗ op_r
@test 1e-11 > D(op,op_)

# Test identityoperator
Idense = identityoperator(DenseOpType, b_l)
id = identityoperator(LazyProduct, b_l)
Expand Down
9 changes: 9 additions & 0 deletions test/test_operators_lazysum.jl
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,15 @@ xbra1 = Bra(b_l, rand(ComplexF64, length(b_l)))
# Test division
@test 1e-14 > D(op1/7, op1_/7)

#Test Tensor
op1_tensor = op1a ⊗ op1
op2_tensor = op1 ⊗ op1a
op1_tensor_ = op1a ⊗ op1_
op2_tensor_ = op1_ ⊗ op1a
@test 1e-14 > D(op1_tensor,op1_tensor_)
@test 1e-14 > D(op1_tensor_,op1_tensor)


# Test tuples vs. vectors
@test (op1+op1).operators isa Tuple
@test (op1+op2).operators isa Tuple
Expand Down
54 changes: 51 additions & 3 deletions test/test_operators_lazytensor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -113,11 +113,25 @@ fac = randn()
@test dense(op3 + fac * op3) ≈ dense(op3) * (1 + fac)
@test dense(op3 - fac * op3) ≈ dense(op3) * (1 - fac)

# Forbidden addition
@test_throws ArgumentError op1 + op2
@test_throws ArgumentError op1 - op2
#Commented since addition between lazytensors now return LazySum and its therefore always allowed.
# Forbidden addition.
#@test_throws ArgumentError op1 + op2
#@test_throws ArgumentError op1 - op2
@test 1e-14 > D(-op1_, -op1)

#Test addition
@test 1e-14 > D(op1+op2, op1_+op2_)
@test 1e-14 > D(op1+op2_, op1_+op2_)
@test 1e-14 > D(op1_+op2, op1_+op2_)

#Test substraction
@test 1e-14 > D(op1 - op2, op1_ - op2_)
@test 1e-14 > D(op1 - op2_, op1_ - op2_)
@test 1e-14 > D(op1_ - op2, op1_ - op2_)
@test 1e-14 > D(op1 + (-op2), op1_ - op2_)
@test 1e-14 > D(op1 + (-1*op2), op1_ - op2_)


# Test multiplication
@test_throws DimensionMismatch op1*op2
@test 1e-11 > D(op1*(x1 + 0.3*x2), op1_*(x1 + 0.3*x2))
Expand Down Expand Up @@ -413,4 +427,38 @@ Lop1 = LazyTensor(b1^2, b2^2, 2, sparse(randoperator(b1, b2)))
@test_throws DimensionMismatch Lop1*dense(Lop1)
@test_throws DimensionMismatch Lop1*sparse(Lop1)


#Test Tensor (only bl == br allowed)
subop1 = randoperator(b1a, b1a)
subop2 = randoperator(b2a, b2a)
subop3 = randoperator(b3a, b3a)
I1 = dense(identityoperator(b1a, b1a))
I2 = dense(identityoperator(b2a, b2a))
I3 = dense(identityoperator(b3a, b3a))
op1 = LazyTensor(b_l, b_l, [1, 3], (subop1, sparse(subop3)), 0.1)
op1_ = 0.1*subop1 ⊗ I2 ⊗ subop3
op2 = LazyTensor(b_l, b_l, [2, 3], (sparse(subop2), subop3), 0.7)
op2_ = 0.7*I1 ⊗ subop2 ⊗ subop3
op3 = 0.3*LazyTensor(b_l, b_l, 3, subop3)
op3_ = 0.3*I1 ⊗ I2 ⊗ subop3

#Cannot tensor CompositeBasis with LazyTensor if its not identity:
@test_throws ArgumentError op1 ⊗ op1_
@test_throws ArgumentError op2_ ⊗ op2

op1_tensor = subop1 ⊗ op1
op1_tensor_ = subop1 ⊗ op1_
op2_tensor = op1 ⊗ subop1
op2_tensor_ = op1_ ⊗ subop1
@test 1e-14 > D(op1_tensor,op1_tensor_)
@test 1e-14 > D(op1_tensor_,op1_tensor)

op1_tensor = (I1 ⊗ I3) ⊗ op1
op1_tensor_ = (I1 ⊗ I3) ⊗ op1_
op2_tensor = op1 ⊗ (I1 ⊗ I3)
op2_tensor_ = op1_ ⊗ (I1 ⊗ I3)
@test 1e-14 > D(op1_tensor,op1_tensor_)
@test 1e-14 > D(op1_tensor_,op1_tensor)


end # testset