Skip to content

Commit

Permalink
Merge pull request #5 from tensor4all/bump-to-v0.5.4
Browse files Browse the repository at this point in the history
Bump to v0.5.4
  • Loading branch information
shinaoka authored Dec 6, 2024
2 parents f667e4f + ac394a1 commit adb5abf
Show file tree
Hide file tree
Showing 5 changed files with 46 additions and 3 deletions.
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "PartitionedMPSs"
uuid = "17ce1de9-5131-45e3-8a48-9723b6e2dc23"
authors = ["Hiroshi Shinaoka <[email protected]> and contributors"]
version = "0.5.3"
version = "0.5.4"

[deps]
Coverage = "a2441757-f6aa-5fb2-8edb-039e3f45d037"
Expand Down
20 changes: 18 additions & 2 deletions src/partitionedmps.jl
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,21 @@ Add two PartitionedMPS objects.
If the two projects have the same projectors in the same order, the resulting PartitionedMPS will have the same projectors in the same order.
By default, we use `directsum` algorithm to compute the sum and no truncation is performed.
"""
function Base.:+(
partmpss::PartitionedMPS...;
alg="directsum",
cutoff=0.0,
maxdim=typemax(Int),
coeffs=ones(length(partmpss)),
kwargs...,
)::PartitionedMPS
result = PartitionedMPS()
for (coeff, partmps) in zip(coeffs, partmpss)
result = +(result, coeff * partmps; alg, cutoff, maxdim, kwargs...)
end
return result
end

function Base.:+(
a::PartitionedMPS,
b::PartitionedMPS;
Expand Down Expand Up @@ -182,6 +197,7 @@ function truncate(
cutoff=default_cutoff(),
maxdim=default_maxdim(),
use_adaptive_weight=true,
maxrefinement=4,
kwargs...,
)::PartitionedMPS
norm2 = [LinearAlgebra.norm(v)^2 for v in values(obj)]
Expand All @@ -190,7 +206,7 @@ function truncate(

compressed = obj

while true
for _ in 1:maxrefinement
compressed = PartitionedMPS([
truncate(v; cutoff=cutoff * w, maxdim, kwargs...) for
(v, w) in zip(values(obj), weights)
Expand All @@ -200,7 +216,7 @@ function truncate(
break
end

weights .*= cutoff / actual_error # Adjust weights
weights .*= min(cutoff / actual_error, 0.5) # Adjust weights
end

return compressed
Expand Down
1 change: 1 addition & 0 deletions src/projector.jl
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ end
projectedinds(p::Projector) = p.data

Base.getindex(p::Projector, inds) = p.data[inds]
Base.getindex(p::Projector, ind, default) = get(p.data, ind, default)
Base.keys(p::Projector) = keys(p.data)

function Base.iterate(obj::Projector, state)
Expand Down
12 changes: 12 additions & 0 deletions src/subdomainmps.jl
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,18 @@ function project(projΨ::SubDomainMPS, projector::Projector)::Union{Nothing,SubD
)
end

function project(
projΨ::SubDomainMPS, pairs::Vararg{Pair{Index{T},Int}}
)::Union{Nothing,SubDomainMPS} where {T}
return project(projΨ, Projector(pairs...))
end

function project(
Ψ::AbstractMPS, pairs::Vararg{Pair{Index{T},Int}}
)::Union{Nothing,SubDomainMPS} where {T}
return project(Ψ, Projector(pairs...))
end

function project::AbstractMPS, projector::Projector)::Union{Nothing,SubDomainMPS}
return project(SubDomainMPS(Ψ), projector)
end
Expand Down
14 changes: 14 additions & 0 deletions test/partitionedmps_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,20 @@ import PartitionedMPSs: PartitionedMPSs, Projector, project, SubDomainMPS, Parti
@test MPS((a + b) + 2 * (b + a)) 3 * Ψ rtol = 1e-13
end

@testset "add" begin
Random.seed!(1234)
N = 3
d = 10
sites = [Index(d, "x=$n") for n in 1:N]
Ψ = MPS(collect(_random_mpo([[s] for s in sites])))

projectors = [Projector(sites[1] => d_) for d_ in 1:d]
coeffs = rand(length(projectors))
prjΨs = [project(Ψ, p) for p in projectors]

@test MPS(+([PartitionedMPS(x) for x in prjΨs]...; coeffs=coeffs)) +([c * MPS(x) for (c, x) in zip(coeffs, prjΨs)]...; alg="directsum")
end

@testset "truncate" begin
for seed in [1, 2, 3, 4, 5]
Random.seed!(seed)
Expand Down

0 comments on commit adb5abf

Please sign in to comment.