From db07bdcc9f28fc3b89a9c816d780416095e487da Mon Sep 17 00:00:00 2001 From: naezzell Date: Tue, 6 Aug 2024 18:05:00 -0700 Subject: [PATCH] preprint v1 release --- .gitignore | 61 + LICENSE | 201 +++ PMRQMC.cpp | 71 ++ PMRQMC_mpi.cpp | 130 ++ README.md | 24 + clean.sh | 18 + correlator_PMRQMC.cpp | 71 ++ .../zhang_prl_model_correlator_driver.py | 137 ++ correlator_mainqmc.hpp | 1107 +++++++++++++++++ divdiff.hpp | 297 +++++ .../zhang_prl_model_fidsus_driver.py | 149 +++ mainqmc.hpp | 1107 +++++++++++++++++ prepare.cpp | 1002 +++++++++++++++ utils/_pauli_manipulations_test.py | 309 +++++ utils/exact_calculations.py | 165 +++ utils/ioscripts.py | 238 ++++ utils/pauli_manipulations.py | 377 ++++++ utils/rng_seeds.txt | 1 + 18 files changed, 5465 insertions(+) create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 PMRQMC.cpp create mode 100644 PMRQMC_mpi.cpp create mode 100644 README.md create mode 100755 clean.sh create mode 100644 correlator_PMRQMC.cpp create mode 100644 correlator_experiments/zhang_prl_model_correlator_driver.py create mode 100644 correlator_mainqmc.hpp create mode 100644 divdiff.hpp create mode 100644 fidsus_experiments/zhang_prl_model_fidsus_driver.py create mode 100644 mainqmc.hpp create mode 100644 prepare.cpp create mode 100644 utils/_pauli_manipulations_test.py create mode 100644 utils/exact_calculations.py create mode 100644 utils/ioscripts.py create mode 100644 utils/pauli_manipulations.py create mode 100644 utils/rng_seeds.txt diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..8eba1ef --- /dev/null +++ b/.gitignore @@ -0,0 +1,61 @@ +# Prerequisites +*.d + +# Compiled Object files +*.slo +*.lo +*.o +*.obj + +# Precompiled Headers +*.gch +*.pch + +# Compiled Dynamic libraries +*.so +*.dylib +*.dll + +# Fortran module files +*.mod +*.smod + +# Compiled Static libraries +*.lai +*.la +*.a +*.lib +*.bin + +# Executables +*.exe +*.out +*.app + +# Hamiltonian files +H.txt +O.txt +hamiltonian.hpp +parameters.hpp + +# submit scripts +job_file.sh + +# temp data files +*temp.txt +*temp_data* +temp_fidsus_data_zhang_prl_rot_model.txt +temp_correlator_data_zhang_prl_rot_model.txt + +# python cache +*__pycache__* +*.ipynb_checkpoints* + +# figures, for now +data_and_plotting/plot_scripts/figures/* + +# vs code things +*vscode* + +# old analysis ideas +*old_analysis_ideas* diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/PMRQMC.cpp b/PMRQMC.cpp new file mode 100644 index 0000000..a39be39 --- /dev/null +++ b/PMRQMC.cpp @@ -0,0 +1,71 @@ +// +// This program implements estimators for the titular quantities in the preprint: +// Nic Ezzell, Lev Barash, Itay Hen, Exact and universal quantum Monte Carlo estimators for energy susceptibility and fidelity susceptibility. +// +// This work augments the base code whose information is below. +// +// +// +// This program implements Permutation Matrix Representation Quantum Monte Carlo for arbitrary spin-1/2 Hamiltonians. +// +// This program is introduced in the paper: +// Lev Barash, Arman Babakhani, Itay Hen, A quantum Monte Carlo algorithm for arbitrary spin-1/2 Hamiltonians, Physical Review Research 6, 013281 (2024). +// +// This program is licensed under a Creative Commons Attribution 4.0 International License: +// http://creativecommons.org/licenses/by/4.0/ +// +// ExExFloat datatype and calculation of divided differences are described in the paper: +// L. Gupta, L. Barash, I. Hen, Calculating the divided differences of the exponential function by addition and removal of inputs, Computer Physics Communications 254, 107385 (2020) +// + +#include"mainqmc.hpp" + +double get_cpu_time(){ return (double)clock() / CLOCKS_PER_SEC;} + +int main(int argc, char* argv[]){ + if(steps < Nbins*stepsPerMeasurement){ + std::cout<<"Error: steps cannot be smaller than Nbins*stepsPerMeasurement."< +#include + +double start_time, elapsed_time; + +void compute(){ + start_time = MPI_Wtime(); + divdiff dd(q+4,500); d=ⅆ init(); + for(step=0;stepPMRQMC, as developed in: Lev Barash, Arman Babakhani, Itay Hen, A quantum Monte Carlo algorithm for arbitrary spin-1/2 Hamiltonians, Physical Review Research 6, 013281 (2024). + +# Instructions +Details on using PMR-QMC itself are contained in the aforementioned PMRQMC. To replicate the experiments in [preprint], simply run the python driver files contained in the "x_experiments/" folders. At minimum, you will need a version of python that contains `numpy`, but one containing also `scipy` allows one to generate exact results for comparison as well. The fidsus_driver takes 3 possible command line arguments... +- `python fidsus_experiments/zhang_prl_model_fidsus_driver.py 0` runs a simple test to ensure things are working properly for susceptibility simulations +- `python fidsus_experiments/zhang_prl_model_fidsus_driver.py 1` runs simulation to generate data for Figure 1 +- `python fidsus_experiments/zhang_prl_model_fidsus_driver.py 2` runs simulation to generate data for Figure 2c and 2d + +In each case, several temporary simulation files will be created in the base directory, and an associated data file (as a csv) will be placed in the `fidsus_experiments/` directory. + +Similarly, +- `python correlator_experiments/zhang_prl_model_correlator_driver.py 0` runs a simple test to ensure things are working properly for correlator simulations +- `python correlator_experiments/zhang_prl_model_correlator_driver.py 2` runs simulation to generate data for Figure 2a and 2b + +# Brief comments on code structure and function +- Code in the base directory is forked from PMRQMC. The code in `mainqmc.hpp` computes the susceptibility estimators and that in `correlator_mainqmc.hpp` computes the correlator estimators. (Technically, they both can compute either, but for technical reasons, it was easier to make separate files for each). +- `Utils/` contains new code written entirely for [preprint]. Each file has a brief description of its purpose at the top. +- Both `fidsus_experiments/` and `correlator_experiments/` just contain simulation drivers as explained above diff --git a/clean.sh b/clean.sh new file mode 100755 index 0000000..4ebe370 --- /dev/null +++ b/clean.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# +# This program is introduced in the paper: +# Lev Barash, Arman Babakhani, Itay Hen, A quantum Monte Carlo algorithm for arbitrary spin-1/2 Hamiltonians (2023). +# +# This program is licensed under a Creative Commons Attribution 4.0 International License: +# http://creativecommons.org/licenses/by/4.0/ +# +# +rm prepare.bin +rm PMRQMC.bin +rm H.txt +rm O.txt +rm hamiltonian.hpp +rm parameters.hpp +rm job_file.sh +rm temp_*.txt + diff --git a/correlator_PMRQMC.cpp b/correlator_PMRQMC.cpp new file mode 100644 index 0000000..2ba5e27 --- /dev/null +++ b/correlator_PMRQMC.cpp @@ -0,0 +1,71 @@ +// +// This program implements estimators for the titular quantities in the preprint: +// Nic Ezzell, Lev Barash, Itay Hen, Exact and universal quantum Monte Carlo estimators for energy susceptibility and fidelity susceptibility. +// +// This work augments the base code whose information is below. +// +// +// +// This program implements Permutation Matrix Representation Quantum Monte Carlo for arbitrary spin-1/2 Hamiltonians. +// +// This program is introduced in the paper: +// Lev Barash, Arman Babakhani, Itay Hen, A quantum Monte Carlo algorithm for arbitrary spin-1/2 Hamiltonians, Physical Review Research 6, 013281 (2024). +// +// This program is licensed under a Creative Commons Attribution 4.0 International License: +// http://creativecommons.org/licenses/by/4.0/ +// +// ExExFloat datatype and calculation of divided differences are described in the paper: +// L. Gupta, L. Barash, I. Hen, Calculating the divided differences of the exponential function by addition and removal of inputs, Computer Physics Communications 254, 107385 (2020) +// + +#include"correlator_mainqmc.hpp" + +double get_cpu_time(){ return (double)clock() / CLOCKS_PER_SEC;} + +int main(int argc, char* argv[]){ + if(steps < Nbins*stepsPerMeasurement){ + std::cout<<"Error: steps cannot be smaller than Nbins*stepsPerMeasurement."< 0: + u.set_as_random(l, eps, seed=seed) + uh0 = h0.conjugate(u) + uh1 = h1.conjugate(u) + uh = uh0 + lam * uh1 + + # ============================================== + # Running PMR QMC + # ============================================== + # save Hamiltonian file and O.txt file + with open("../H.txt", 'w') as f: + f.write(uh.to_pmr_str()) + with open("../O.txt", 'w') as f: + f.write(uh1.to_pmr_str()) + param_str = make_no_stand_param_fstr(seed, beta, tau, Tsteps, steps, stepsPerMeasurement) + with open("../parameters.hpp", "w") as f: + f.write(param_str) + # compile and run PMR-QMC + job_file = "job_file.sh" + temp_out_file = "temp_correlator_data_zhang_prl_rot_model.txt" + with open("../" + job_file, 'w') as fh: + fh.write("#!/bin/bash\n") + fh.write("g++ -O3 -std=c++11 -o prepare.bin prepare.cpp\n") + fh.write("./prepare.bin H.txt $(ls O.txt O.txt 2> /dev/null)\n") + #fh.write("./prepare.bin H.txt\n") + fh.write("g++ -O3 -std=c++11 -o PMRQMC.bin correlator_PMRQMC.cpp\n") + fh.write(f"./PMRQMC.bin > {temp_out_file}") + print(f"Trying to submit: {job_file}") + subprocess.run(f"cd ..; chmod +x {job_file}; ./{job_file}", shell=True) + + #subprocess.run(["bash", job_file]) + print("Success?") + # ============================================== + # Process and save output + # ============================================== + emergent, obs, std, time = parse_correlator_temp("../" + temp_out_file) + #----- form file ------ + fname = f"correlator_data_zhang_prl_rot_curve_{strnow}.csv" + # if file does not exit, open and add header + if not os.path.exists(fname): + with open(fname, "w") as f: + header1 = "n,tau,lam,beta,H1,H1_std,corr,corr_std," + header2 = "rng,eps,l,Tsteps,steps,stepsPerMeasurement," + header3 = "sign,sign_std,q,qmax,time" + f.write(header1 + header2 + header3) + # always append + with open(fname, "a+") as f: + # save plot param inputs + line1 = f"\n{n},{tau},{lam},{beta},{obs[0]},{std[0]}," + line2 = f"{obs[1]},{std[1]}," + line3 = f"{seed},{eps},{l},{Tsteps},{steps},{stepsPerMeasurement}," + line4 = f"{emergent[0]},{emergent[1]},{emergent[2]},{emergent[3]},{time}" + f.write(line1 + line2 + line3 + line4) + + return fname +# %% + +# %% +if __name__=="__main__": + # ==================================== + # Header part of main + # ==================================== + # arg parse which experiment to do (see below) + experiment = int(sys.argv[1]) + # get current date-time + now = datetime.datetime.now() + strnow = now.strftime("%Y-%m-%d_%H-%M-%S") + # get rng seeds + with open("../utils/rng_seeds.txt", "r") as f: + seeds = f.readline().split(',') + seeds = [int(s) for s in seeds] + # ==================================== + # Experiments one can run + # ==================================== + # ********************* + # example experiment + # ********************* + if experiment == 0: + n = 2 + rng = seeds[0] + tau = 0.0 + lam = 0.1 + beta = 0.25 + main(n, rng, tau, lam, beta, strnow) + # ********************* + # Fig 2 (a),(b) experiments + # ********************* + if experiment == 2: + n = 100 + beta = 20.0 + lam = 1.0 + high_sign_seeds = [3026438146, 2781301355, 4206712692, 2270171661, 2834120170, 1370102282, 2172515818, 185933287, 4109413259, 3629902865] + for rng in high_sign_seeds: + for tau in np.linspace(0, beta, 200): + main(n, rng, tau, lam, beta, strnow) + # %% diff --git a/correlator_mainqmc.hpp b/correlator_mainqmc.hpp new file mode 100644 index 0000000..12b87ee --- /dev/null +++ b/correlator_mainqmc.hpp @@ -0,0 +1,1107 @@ +// +// This program implements estimators for the titular quantities in the preprint: +// Nic Ezzell, Lev Barash, Itay Hen, Exact and universal quantum Monte Carlo estimators for energy susceptibility and fidelity susceptibility. +// +// This work augments the base code whose information is below. +// +// +// This program implements Permutation Matrix Representation Quantum Monte Carlo for arbitrary spin-1/2 Hamiltonians. +// +// This program is introduced in the paper: +// Lev Barash, Arman Babakhani, Itay Hen, A quantum Monte Carlo algorithm for arbitrary spin-1/2 Hamiltonians, Physical Review Research 6, 013281 (2024). +// +// This program is licensed under a Creative Commons Attribution 4.0 International License: +// http://creativecommons.org/licenses/by/4.0/ +// +// ExExFloat datatype and calculation of divided differences are described in the paper: +// L. Gupta, L. Barash, I. Hen, Calculating the divided differences of the exponential function by addition and removal of inputs, Computer Physics Communications 254, 107385 (2020) +// + +#include +#include +#include +#include +#include +#include +#include +#include"divdiff.hpp" +#include"hamiltonian.hpp" // use a header file, which defines the Hamiltonian and the custom observables +#include"parameters.hpp" // parameters of the simulation such as the number of Monte-Carlo updates + +#define measurements (steps/stepsPerMeasurement) + +#ifdef EXHAUSTIVE_CYCLE_SEARCH + #define rmin 0 // length r of sub-sequence is chosen randomly between rmin and rmax + #define rmax cycle_max_len + #define lmin r // cycle lengths must be between lmin and lmax + #define lmax cycle_max_len +#else + #define rmin (cycle_min_len-1)/2 + #define rmax (cycle_max_len+1)/2 + #define lmin 2*r-1 + #define lmax 2*r+1 +#endif + +static std::random_device rd; +static std::mt19937 rng; +static std::uniform_int_distribution<> dice2(0,1); +static std::uniform_int_distribution<> diceN(0,N-1); +static std::uniform_int_distribution<> diceNop(0,Nop-1); +static std::uniform_real_distribution<> val(0.0,1.0); +static std::geometric_distribution<> geometric_int(0.8); + +ExExFloat beta_pow_factorial[qmax]; // contains the values (-beta)^q / q! +ExExFloat beta_div2_pow_factorial[qmax]; // contains the values (-beta/2)^q / q! +ExExFloat tau_pow_factorial[qmax]; // contains the values (-tau)^q / q! +ExExFloat tau_minus_beta_pow_factorial[qmax]; // contains the values (tau-beta)^q / q! +double factorial[qmax]; // contains the values q! +int cycle_len[Ncycles]; +int cycles_used[Ncycles]; +int n_cycles[Nop+3]; // numbers of cycles of lengths 0,1,2,...,Nop+2, the last three values are always zeros. +int cycle_min_len, cycle_max_len, found_cycles, min_index, max_index; + +#ifndef MEASURE_CUSTOM_OBSERVABLES +#define Nobservables 0 +#endif + +const int N_all_observables = Nobservables + 15; +int valid_observable[N_all_observables]; + +int bin_length = measurements / Nbins; +double in_bin_sum[N_all_observables]; +double bin_mean[N_all_observables][Nbins]; +double in_bin_sum_sgn; +double bin_mean_sgn[Nbins]; + +int q; +int qmax_achieved=0; + +divdiff* d; // primary QMC divided difference of [-\beta Ez_0,...,-\beta Ez_q] +divdiff* dfs; // QMC divided difference for fidsus [(-\beta/2)Ez_0,...,(-\beta/2)Ez_q] +divdiff* ds1; // a scratch divided diff for measurements +divdiff* ds2; // a scratch divided diff for measurements + +std::bitset lattice; +std::bitset z; +std::bitset P; +std::bitset Pk, Pl; + +int Sq[qmax]; // list of q operator indices +int Sq_backup[qmax]; +int Sq_subseq[qmax]; +int Sq_gaps[qmax]; +double Energies[qmax+1]; +double Energies_backup[qmax+1]; +int eoccupied[qmax+1]; +double currEnergy; +std::complex old_currD, currD, currMDk, currMDl, currMD0; +std::complex currD_partial[qmax], currMDk_trace[qmax], currMDl_trace[qmax], currMD0_trace[qmax]; + +ExExFloat one, currWeight; +unsigned long long step; +unsigned long long measurement_step; + +double CalcEnergy(){ // calculate the energy of a given configuration of spins + std::complex sum = 0; + for(int i=0;i calc_d(int k){ // calculate d_k = for the current configuration of spins + std::complex sum = 0; + for(int i=0;iCurrentLength=0; GetEnergies(); + for(int i=0;i<=q;i++) d->AddElement(-beta*Energies[i]); + return d->divdiffs[q] * beta_pow_factorial[q] * currD.real(); +} + +ExExFloat UpdateWeight(){ + int i, j, notfound, n=d->CurrentLength; double value; + GetEnergies(); memset(eoccupied,0,(q+1)*sizeof(int)); + for(i=0;iz[i]; + for(j=0;j<=q;j++) if(eoccupied[j]==0 && value == -beta*Energies[j]){ notfound = 0; break; } + if(notfound) break; eoccupied[j] = 1; + } + if(i==0) d->CurrentLength=0; else while(n>i){ d->RemoveElement(); n--; } + j=0; while(i<=q){ while(eoccupied[j]) j++; d->AddElement(-beta*Energies[j++]); i++; } + return d->divdiffs[q] * beta_pow_factorial[q] * currD.real(); +} + +ExExFloat UpdateWeightReplace(double removeEnergy, double addEnergy){ + if(removeEnergy != addEnergy) if(d->RemoveValue(-beta*removeEnergy)) d->AddElement(-beta*addEnergy); else{ + std::cout << "Error: energy not found" << std::endl; exit(1); + } + return d->divdiffs[q] * beta_pow_factorial[q] * currD.real(); // use this value only when the values of q and currD are correct +} + +ExExFloat UpdateWeightDel(double removeEnergy1, double removeEnergy2){ + if(d->RemoveValue(-beta*removeEnergy1) && d->RemoveValue(-beta*removeEnergy2)) + return d->divdiffs[q] * beta_pow_factorial[q] * currD.real(); // use this value only when the values of q and currD are correct + else{ + std::cout << "Error: energy not found" << std::endl; exit(1); + } +} + +ExExFloat UpdateWeightIns(double addEnergy1, double addEnergy2){ + d->AddElement(-beta*addEnergy1); d->AddElement(-beta*addEnergy2); + return d->divdiffs[q] * beta_pow_factorial[q] * currD.real(); // use this value only when the values of q and currD are correct +} + +int NoRepetitionCheck(int* sequence, int r){ // check for absence of repetitions in a sequence of length r + int i,j,rep = 1; + for(i=0;ilmax) continue; + not_contained = 0; for(j=0;j0 ? found_cycle_list[int(val(rng)*found_cycles)] : -1; // returns one of the found cycles chosen randomly. +} + +//unsigned int rng_seed; + +void init(){ + int i; double curr2=1; ExExFloat curr1; beta_pow_factorial[0] = curr1; factorial[0] = curr2; + ExExFloat curr_tau; ExExFloat curr_bmt; ExExFloat curr_beta2; + for(q=1;q=0;i--) if(dice2(rng)) lattice.set(i); z = lattice; q=0; + currWeight = GetWeight(); + for(i=0;i0 ? val(rng) : 1; ExExFloat newWeight; + if(v < 0.3){ //local move + if(v<0.1 && q>=2){ // attempt to swap Sq[m] and Sq[m+1] + m = int(val(rng)*(q-1)); // m is between 0 and (q-2) + if(Sq[m]!=Sq[m+1]){ + oldE = Energies[m+1]; old_currD = currD; + p = Sq[m]; Sq[m] = Sq[m+1]; Sq[m+1] = p; GetEnergies(); newWeight = UpdateWeightReplace(oldE,Energies[m+1]); + if(val(rng) < Metropolis(newWeight)) currWeight = newWeight; else { + Sq[m+1] = Sq[m]; Sq[m] = p; currD = old_currD; + UpdateWeightReplace(Energies[m+1],oldE); Energies[m+1] = oldE; + } + } + } else if(v<0.2){ // attempt to delete Sq[m] and Sq[m+1] + if(q>=2){ + m = int(val(rng)*(q-1)); // m is between 0 and (q-2) + if(Sq[m]==Sq[m+1]){ + oldE = Energies[m]; oldE2 = Energies[m+1]; old_currD = currD; + memcpy(Sq_backup,Sq,q*sizeof(int)); memcpy(Energies_backup,Energies,(q+1)*sizeof(double)); + for(i=m;i=m;i--) Sq[i+2] = Sq[i]; q+=2; Sq[m] = Sq[m+1] = p; + GetEnergies(); newWeight = UpdateWeightIns(Energies[m],Energies[m+1]); + if(val(rng) < Metropolis(newWeight)) currWeight = newWeight; else{ + q-=2; memcpy(Sq,Sq_backup,q*sizeof(int)); memcpy(Energies,Energies_backup,(q+1)*sizeof(double)); + currD = old_currD; d->RemoveElement(); d->RemoveElement(); + } + } else qmax_achieved = 1; + } else if(v < 0.5){ // attempting a fundamental cycle completion + int oldq, j = 0, inv_pr; double wfactor; + u = geometric_int(rng); // a random integer u is picked according to geometric distribution + if(q >= u+rmin){ + inv_pr = min(rmax,q-u)-(rmin)+1; + r = int(val(rng)*inv_pr) + (rmin); // r is random integer between rmin and min(rmax,q-u) + PickSubsequence(r+u); // indexes of the subsequence are min_index, min_index+1,..., max_index=min_index+r+u-1 + std::shuffle(Sq_subseq,Sq_subseq+r+u,rng); + if(NoRepetitionCheck(Sq_subseq,r)){ + for(i=0;i 0){ // cycles[m] is one of the found cycles, containing all the operators of Sq_subseq + P = cycles[m]; for(i=0;imax_index;i--) Sq[i+p-r] = Sq[i]; // shift the values to the right + else if(r>p) for(i=max_index+1;i=2){ // attempting a block swap + m = q==2 ? 0 : int(val(rng)*(q-1)); // m is between 0 and (q-2) + oldE = currEnergy; for(i=0;i<=m;i++) ApplyOperator(Sq[i]); + for(i=0;i<=m;i++) Sq_backup[q-1-m+i] = Sq[i]; + for(i=m+1;i calc_MD0(int n){ // calculate for the current configuration of spins and observable n + std::complex sum = 0; + for(int i=0;i og_lattice = lattice; + currMD0 = currMD0_trace[0] = calc_MD0(n); + for(int i=0;i calc_MD(int n, int k){ // calculate d_k = for the current configuration of spins and observable n + std::complex sum = 0; + for(int i=0;i og_lattice = lattice; + currMDk = currMDk_trace[0] = 1; + for(int i=0;iz[q]/(-beta); //z[q] = (-beta) * E_{z_q} + if(q > 0) R += (d->divdiffs[q-1]/d->divdiffs[q]).get_double()*q/(-beta); + return R; +} + +double measure_H2(){ + double R = (d->z[q]/(-beta))*(d->z[q]/(-beta)); + if(q>0) R += (d->z[q]/(-beta) + d->z[q-1]/(-beta))*(d->divdiffs[q-1]/d->divdiffs[q]).get_double()*q/(-beta); + if(q>1) R += (d->divdiffs[q-2]/d->divdiffs[q]).get_double()*(q*(q-1))/(-beta)/(-beta); + return R; +} + +double measure_Hdiag(){ + return currEnergy; +} + +double measure_Hdiag2(){ + return currEnergy*currEnergy; +} + +double measure_Hoffdiag(){ + double R = 0; + if(q > 0) R += (d->divdiffs[q-1]/d->divdiffs[q]).get_double()*q/(-beta); + return R; +} + +double measure_Hoffdiag2(){ + double R = (d->z[q]/(-beta))*(d->z[q]/(-beta)) + currEnergy*(currEnergy - 2*measure_H()); + if(q>0) R += (d->z[q]/(-beta) + d->z[q-1]/(-beta))*(d->divdiffs[q-1]/d->divdiffs[q]).get_double()*q/(-beta); + if(q>1) R += (d->divdiffs[q-2]/d->divdiffs[q]).get_double()*(q*(q-1))/(-beta)/(-beta); + return R; +} + +double measure_Hdiag_corr(){ + double R = (d->z[0]/(-beta))/(d->divdiffs[q]*beta_pow_factorial[q]).get_double(); + ds1->CurrentLength=0; ds2->CurrentLength=0; // reset scratch divdiffs + // init ds2 with full divided difference + double tot_num = 0; + for(int j = q; j >= 0; j--) ds2->AddElement((tau - beta)*(d->z[j]/(-beta))); + for(int j = 0; j <= q; j++) { + ds1->AddElement(-tau*(d->z[j]/(-beta))); + tot_num += (d->z[j]/(-beta))*((ds1->divdiffs[j]*tau_pow_factorial[j])*(ds2->divdiffs[q-j]*tau_minus_beta_pow_factorial[q-j])).get_double(); + ds2->RemoveElement(); + } + R *= tot_num; + return R; +} + +double measure_Hoffdiag_corr(){ + double R = measure_H2(); + R += measure_Hdiag_corr(); + R -= 2.0 * (d->z[0]/(-beta)) * measure_H(); + return R; +} + +double measure_Hdiag_int1(){ + double R = -(d->z[0]/(-beta))/(d->divdiffs[q]*beta_pow_factorial[q]).get_double(); + double tot_numerator = 0; + for(int j = 0; j <= q; j++) { + d->AddElement(-beta*(d->z[j]/(-beta))); + tot_numerator += (d->z[j]/(-beta))*(d->divdiffs[q+1]*beta_pow_factorial[q+1]).get_double(); + d->RemoveElement(); + } + R *= tot_numerator; + return R; +} + +double measure_Hoffdiag_int1(){ + double R = beta * measure_H2(); + R += measure_Hdiag_int1(); + R -= 2.0 * beta * (d->z[0]/(-beta)) * measure_H(); + return R; +} + +double measure_Hdiag_int2(){ + double R = 0.0; + double prefac = -(d->z[0]/(-beta))/(d->divdiffs[q]*beta_pow_factorial[q]).get_double(); + double Ezj, Ezu; + for(int j = 0; j <= q; j++) { + Ezj = d->z[j]/(-beta); + d->AddElement(-beta*Ezj); + R += prefac*Ezj*beta*(d->divdiffs[q+1]*beta_pow_factorial[q+1]).get_double(); + d->AddElement(0); + R += prefac*Ezj*(j+1.0)*(d->divdiffs[q+2]*beta_pow_factorial[q+2]).get_double(); + for(int u = 0; u <= j; u++){ + Ezu = d->z[u]/(-beta); + d->AddElement(-beta*Ezu); + R += prefac*Ezj*Ezu*(d->divdiffs[q+3]*beta_pow_factorial[q+3]).get_double(); + d->RemoveElement(); + } + d->RemoveElement(); + d->RemoveElement(); + } + return R; +} + +double measure_Hoffdiag_int2(){ + double R = (beta * beta * measure_H2()) / 2.0; + R += measure_Hdiag_int2(); + R -= beta * beta * (d->z[0]/(-beta)) * measure_H(); + return R; +} + +double measure_Hdiag_int3(){ + double R = -(d->z[0]/(-beta))/(d->divdiffs[q]*beta_pow_factorial[q]).get_double(); + double tot_numerator = 0; + double factor = 0; + for(int j = 0; j <= q; j++) { + // reset and init divdiffs + ds1->CurrentLength=0; ds2->CurrentLength=0; + for(int k = q; k >= j; k--) ds2->AddElement((-beta/2)*(d->z[k]/(-beta))); + ds2->AddElement((-beta/2)*(d->z[j]/(-beta))); + for(int k = j-1; k >= 0; k--) ds2->AddElement((-beta/2)*(d->z[k]/(-beta))); + // sum over r to accumulate contribution for fixed j + for(int r = 0; r <= j; r++) { + ds1->AddElement((-beta/2)*(d->z[r]/(-beta))); + factor = (d->z[j]/(-beta))*(ds1->divdiffs[r]*beta_div2_pow_factorial[r]).get_double(); + tot_numerator += (factor*(beta/2)*(ds2->divdiffs[q+1-r]*beta_div2_pow_factorial[q+1-r]).get_double()); + ds2->AddElement(0); + tot_numerator += (factor*(j-r+1)*(ds2->divdiffs[q+2-r]*beta_div2_pow_factorial[q+2-r]).get_double()); + for(int u = r; u <= j; u++){ + ds2->AddElement((-beta/2)*(d->z[u]/(-beta))); + tot_numerator += (factor*(d->z[u]/(-beta))*(ds2->divdiffs[q+3-r]*beta_div2_pow_factorial[q+3-r]).get_double()); + ds2->RemoveElement(); + } + ds2->RemoveElement(); + ds2->RemoveElement(); + } + } + R *= tot_numerator; + return R; +} + +double measure_Hoffdiag_int3(){ + double R = (beta * beta * measure_H2()) / 8.0; + R += measure_Hdiag_int3(); + R -= (beta * beta * (d->z[0]/(-beta)) * measure_H()) / 4.0; + return R; +} + +double measure_Z_magnetization(){ + return (2.0*lattice.count() - N)/N; +} + +std::string name_of_observable(int n){ + std::string s; + if(n < Nobservables){ +#ifdef MEASURE_CUSTOM_OBSERVABLES + s = Mnames[n]; +#endif + } else switch(n-Nobservables){ + case 0: s = "H"; break; + case 1: s = "H^2"; break; + case 2: s = "H_{diag}"; break; + case 3: s = "H_{diag}^2"; break; + case 4: s = "H_{offdiag}"; break; + case 5: s = "H_{offdiag}^2"; break; + case 6: s = "Z_magnetization"; break; + case 7: s = "measure_Hdiag_corr"; break; + case 8: s = "measure_Hdiag_int1"; break; + case 9: s = "measure_Hdiag_int2"; break; + case 10: s = "measure_Hdiag_int3"; break; + case 11: s = "measure_Hoffdiag_corr"; break; + case 12: s = "measure_Hoffdiag_int1"; break; + case 13: s = "measure_Hoffdiag_int2"; break; + case 14: s = "measure_Hoffdiag_int3"; break; + } + return s; +} + +double measure_observable(int n){ + double R = 0; + if(valid_observable[n]) if(n < Nobservables){ +#ifdef MEASURE_CUSTOM_OBSERVABLES + int i,k,len,cont,l,lenk,lenl,j,t,r; + std::complex T = 0; + std::complex A0z = calc_MD0(n); + std::complex Akz = 0.0; + std::complex prefac = 0.0; + std::complex inner_prefac = 0.0; + switch(n){ + // measures + case 0: + T += A0z; // add contribution of diagonal of custom O + for(k=0;kq) continue; + // cannot have repeats in Sq[q-k+1],Sq[q-k+2],...,Sq[q] or cannot be making marked P + // because indexing from 0, actually checking Sq[q-k],Sq[q-k+1],...,Sq[q-1] + if(!NoRepetitionCheck(Sq+(q-len),len)) continue; + // Sq[q-k] * Sq[q-k+1] * ... * Sq[q-1] == P (i.e. each Sq[q-1-i] is in P is what test is doing) + cont = 0; for(i=0;idivdiffs[q-len]/d->divdiffs[q]).get_double() * + (beta_pow_factorial[q-len]/beta_pow_factorial[q]).get_double()/factorial[len] * + (currD_partial[q-len]/currD) * calc_MD(n,k); // calc_MD(n,k) gives \tilde{A}(z) + // currD_parital[q-len]/currD is be ratio in eq.32 + } + R = (currD*T).real()/currD.real(); // we importance-sample Re(W_C A_C)/Re(W_C) + break; + // measures + case 4: + T += A0z*A0z; // diag/diag component + // compute (k=0, l) contribution + for(l=0;lq) continue; // need lenl > q + if(!NoRepetitionCheck(Sq+(q-lenl),lenl)) continue; // check {Sq[q-lenl], ..., Sq[q-1]} no repeats + // checks that Sq[q-1-i] \in P_l for i=0, i=lenl-1. + cont = 0; for(i=0;idivdiffs[q-lenl]/d->divdiffs[q]).get_double() * + (beta_pow_factorial[q-lenl]/beta_pow_factorial[q]).get_double()/(factorial[lenl]) * + (currD_partial[q-lenl]/currD); + } + for(k=0;kq) continue; + if(!NoRepetitionCheck(Sq+(q-lenk),lenk)) continue; + cont = 0; for(i=0;idivdiffs[q-lenk]/d->divdiffs[q]).get_double() * + (beta_pow_factorial[q-lenk]/beta_pow_factorial[q]).get_double()/factorial[lenk] * + (currD_partial[q-lenk]/currD); + // compute (k, l) contributions + for(l=0;lq) continue; // need lenk + lenl > q + if(!NoRepetitionCheck(Sq+(q-lenk-lenl),lenl)) continue; // check {Sq[q-lenk-lenl], ..., Sq[q-1-lenk]} no repeats + // checks that Sq[q-1-lenk-i] \in P_l for i=0, i=lenl-1. + cont = 0; for(i=0;idivdiffs[q-lenk-lenl]/d->divdiffs[q]).get_double() * + (beta_pow_factorial[q-lenk-lenl]/beta_pow_factorial[q]).get_double()/(factorial[lenk]*factorial[lenl]) * + (currD_partial[q-lenk-lenl]/currD); + } + } + R = (currD*T).real()/currD.real(); + break; + // measures + case 1: + // add (k=0, l=0) pure diagonal contribution with Leibniz rule (like Hdiag_corr) + calc_MD0_trace(n); + prefac = A0z / (d->divdiffs[q]*beta_pow_factorial[q]).get_double(); + ds1->CurrentLength=0; ds2->CurrentLength=0; // reset scratch divdiffs + for(int j = q; j >= 0; j--) ds2->AddElement((tau - beta)*(d->z[j]/(-beta))); // init ds2 + for(int j = 0; j <= q; j++) { + ds1->AddElement(-tau*(d->z[j]/(-beta))); + T += prefac*currMD0_trace[j]*((ds1->divdiffs[j]*tau_pow_factorial[j])*(ds2->divdiffs[q-j]*tau_minus_beta_pow_factorial[q-j])).get_double(); + ds2->RemoveElement(); + } + // add (k=0, l) cross term contribution + prefac = A0z / ((d->divdiffs[q]*beta_pow_factorial[q]).get_double()); + for(l=0;lq) continue; + calc_MD_trace_l(n ,l); + ds1->CurrentLength=0; ds2->CurrentLength=0; // reset scratch divdiffs + for(int j = q; j >= lenl; j--) ds2->AddElement((-tau)*(d->z[j]/(-beta))); // init ds2 + ds2->AddElement(0); // add dummy value for below continue logic to hold + for (int j = lenl; j <= q; j++){ + ds2->RemoveElement(); + ds1->AddElement((-beta+tau)*(d->z[j-lenl]/(-beta))); + // remainder of 1^{(j)}_{\tilde{P}_l}(S_{i_q}) condition + if(!NoRepetitionCheck(Sq+(j-lenl),lenl)) continue; + cont = 0; for(i=0;idivdiffs[j-lenl]*tau_minus_beta_pow_factorial[j-lenl]).get_double() * + (ds2->divdiffs[q-j]*tau_pow_factorial[q-j]).get_double()/(factorial[lenl]) * + (currD_partial[j-lenl]/currD_partial[j]); + } + } + // add (k, l=0) and (k, l) contributions + for(k=0;kq) continue; + if(!NoRepetitionCheck(Sq+(q-lenk),lenk)) continue; + cont = 0; for(i=0;idivdiffs[q]*beta_pow_factorial[q]).get_double(); + ds1->CurrentLength=0; ds2->CurrentLength=0; // reset scratch divdiffs + for(int j = q-lenk; j >= 0; j--) ds2->AddElement((-tau)*(d->z[j]/(-beta))); // init ds2 + for(int j = 0; j <= q-lenk; j++){ + ds1->AddElement((-beta+tau)*(d->z[j]/(-beta))); + T += prefac*currMD0_trace[j]*(ds1->divdiffs[j]*tau_minus_beta_pow_factorial[j]).get_double() * + (ds2->divdiffs[q-lenk-j]*tau_pow_factorial[q-lenk-j]).get_double()/(factorial[lenk]) * + (currD_partial[q-lenk]/currD); + ds2->RemoveElement(); + } + for(l=0;lq) continue; + calc_MD_trace_l(n ,l); + // compute (k, l) Lebniz rule contribution + std::complex prefac = Akz / ((d->divdiffs[q]*beta_pow_factorial[q]).get_double()); + ds1->CurrentLength=0; ds2->CurrentLength=0; // reset scratch divdiffs + for(int j = q-lenk; j >= lenl; j--) ds2->AddElement((-tau)*(d->z[j]/(-beta))); // init ds2 + ds2->AddElement(0); + for (int j = lenl; j <= q-lenk; j++){ + ds2->RemoveElement(); + ds1->AddElement((-beta+tau)*(d->z[j-lenl]/(-beta))); + // remainder of 1^{(j)}_{\tilde{P}_l}(S_{i_q}) condition + if(!NoRepetitionCheck(Sq+(j-lenl),lenl)) continue; + cont = 0; for(i=0;idivdiffs[j-lenl]*tau_minus_beta_pow_factorial[j-lenl]).get_double() * + (ds2->divdiffs[q-lenk-j]*tau_pow_factorial[q-lenk-j]).get_double()/(factorial[lenk]*factorial[lenl]) * + (currD_partial[j-lenl]/currD) * (currD_partial[q-lenk]/currD_partial[j]); + } + } + + } + R = (currD*T).real()/currD.real(); + break; + // measures \int_0^\beta \dtau + case 5: + // add (k=0, l=0) pure diagonal contribution with Leibniz rule (like Hdiag_int1) + calc_MD0_trace(n); + prefac = -A0z / (d->divdiffs[q]*beta_pow_factorial[q]).get_double(); + for(int j = 0; j <= q; j++) { + d->AddElement(-beta*(d->z[j]/(-beta))); + T += prefac*currMD0_trace[j]*(d->divdiffs[q+1]*beta_pow_factorial[q+1]).get_double(); + d->RemoveElement(); + } + // add (k=0, l) cross term contribution + prefac = -A0z / ((d->divdiffs[q]*beta_pow_factorial[q]).get_double()); + for(l=0;lq) continue; + calc_MD_trace_l(n ,l); + ds1->CurrentLength=0; // reset scratch divdiffs + for(int j = q; j >= lenl; j--) ds1->AddElement((-beta)*(d->z[j]/(-beta))); // init ds1 + ds1->AddElement(0); // add dummy value for continue logic + for(int j = lenl; j <= q; j++){ + ds1->RemoveElement(); // remove before check so things work with continue + // remainder of 1^{(j)}_{\tilde{P}_l}(S_{i_q}) condition + if(!NoRepetitionCheck(Sq+(j-lenl),lenl)) continue; + cont = 0; for(i=0;iAddElement((-beta)*(d->z[i]/(-beta))); + // add contribution if relevant + T += prefac*currMDl_trace[j]*(ds1->divdiffs[q-lenl+1]*beta_pow_factorial[q-lenl+1]).get_double() * + (currD_partial[j-lenl]/currD_partial[j])/(factorial[lenl]); + // remove \Lambda_j elements from \Theta_j multiset + for(int i = 0; i <= j-lenl; i++) ds1->RemoveElement(); + ds1->RemoveElement(); + } + } + // add (k, l=0) and (k, l) contributions + for(k=0;kq) continue; + if(!NoRepetitionCheck(Sq+(q-lenk),lenk)) continue; + cont = 0; for(i=0;idivdiffs[q]*beta_pow_factorial[q]).get_double(); + ds1->CurrentLength=0; // reset scratch divdiffs + for(int j = q-lenk; j >= 0; j--) ds1->AddElement((-beta)*(d->z[j]/(-beta))); // init ds1 + for(int j = 0; j <= q-lenk; j++){ + // add \Lambda_j elements to \Theta_j multiset + for(int i = 0; i <= j; i++) ds1->AddElement((-beta)*(d->z[i]/(-beta))); + T += prefac*currMD0_trace[j]*(ds1->divdiffs[q-lenk+1]*beta_pow_factorial[q-lenk+1]).get_double() * + (currD_partial[q-lenk]/currD)/(factorial[lenk]); + // remove \Lambda_j elements from \Theta_j multiset + for(int i = 0; i <= j; i++) ds1->RemoveElement(); + ds1->RemoveElement(); + } + for(l=0;lq) continue; + calc_MD_trace_l(n ,l); + // compute (k, l) Lebniz rule contribution + prefac = -Akz / ((d->divdiffs[q]*beta_pow_factorial[q]).get_double()); + ds1->CurrentLength=0; // reset scratch divdiffs + for(int j = q-lenk; j >= lenl; j--) ds1->AddElement((-beta)*(d->z[j]/(-beta))); // init ds1 + ds1->AddElement(0); // add dummy value for continue logic + for (int j = lenl; j <= q-lenk; j++){ + ds1->RemoveElement(); // remove before check so things work with continue + // remainder of 1^{(j)}_{\tilde{P}_l}(S_{i_q}) condition + if(!NoRepetitionCheck(Sq+(j-lenl),lenl)) continue; + cont = 0; for(i=0;iAddElement((-beta)*(d->z[i]/(-beta))); + // add contribution if relevant + T += prefac*currMDl_trace[j]*(ds1->divdiffs[q-lenk-lenl+1]*beta_pow_factorial[q-lenk-lenl+1]).get_double() * + (currD_partial[j-lenl]/currD) * (currD_partial[q-lenk]/currD_partial[j])/(factorial[lenk]*factorial[lenl]); + // remove \Lambda_j elements from \Theta_j multiset + for(int i = 0; i <= j-lenl; i++) ds1->RemoveElement(); + } + } + + } + R = (currD*T).real()/currD.real(); + break; + // measures \int_0^\beta \dtau \tau + case 2: + // add (k=0, l=0) pure diagonal contribution with Leibniz rule (like Hdiag_int2) + calc_MD0_trace(n); + prefac = -A0z / (d->divdiffs[q]*beta_pow_factorial[q]).get_double(); + for(int j = 0; j <= q; j++) { + d->AddElement(-beta*(d->z[j]/(-beta))); + T += prefac*currMD0_trace[j]*beta*(d->divdiffs[q+1]*beta_pow_factorial[q+1]).get_double(); + d->AddElement(0); + T += prefac*currMD0_trace[j]*(j+1.0)*(d->divdiffs[q+2]*beta_pow_factorial[q+2]).get_double(); + for(int u = 0; u <= j; u++){ + d->AddElement(-beta*(d->z[u]/(-beta))); + T += prefac*currMD0_trace[j]*(d->z[u]/(-beta))*(d->divdiffs[q+3]*beta_pow_factorial[q+3]).get_double(); + d->RemoveElement(); + } + d->RemoveElement(); + d->RemoveElement(); + } + // add (k=0, l) cross term contribution + prefac = -A0z / ((d->divdiffs[q]*beta_pow_factorial[q]).get_double()); + for(l=0;lq) continue; + calc_MD_trace_l(n ,l); + ds1->CurrentLength=0; // reset scratch divdiffs + for(int j = q; j >= lenl; j--) ds1->AddElement((-beta)*(d->z[j]/(-beta))); // init ds1 + ds1->AddElement(0); // add dummy value for continue logic + for(int j = lenl; j <= q; j++){ + ds1->RemoveElement(); // remove before check so things work with continue + // remainder of 1^{(j)}_{\tilde{P}_l}(S_{i_q}) condition + if(!NoRepetitionCheck(Sq+(j-lenl),lenl)) continue; + cont = 0; for(i=0;iAddElement((-beta)*(d->z[i]/(-beta))); + // add contribution if relevant + T += prefac*currMDl_trace[j]*beta*(ds1->divdiffs[q-lenl+1]*beta_pow_factorial[q-lenl+1]).get_double() * + (currD_partial[j-lenl]/currD_partial[j])/(factorial[lenl]); + ds1->AddElement(0); + T += prefac*currMDl_trace[j]*(j-lenl+1.0)*(ds1->divdiffs[q-lenl+2]*beta_pow_factorial[q-lenl+2]).get_double() * + (currD_partial[j-lenl]/currD_partial[j])/(factorial[lenl]); + for(int u = 0; u <= j-lenl; u++){ + ds1->AddElement(-beta*(d->z[u]/(-beta))); + T += prefac*currMDl_trace[j]*(d->z[u]/(-beta))*(ds1->divdiffs[q-lenl+3]*beta_pow_factorial[q-lenl+3]).get_double() * + (currD_partial[j-lenl]/currD_partial[j])/(factorial[lenl]); + ds1->RemoveElement(); + } + // remove \Lambda_j elements from \Theta_j multiset + for(int i = 0; i <= j-lenl; i++) ds1->RemoveElement(); + ds1->RemoveElement(); + } + } + // add (k, l=0) and (k, l) contributions + for(k=0;kq) continue; + if(!NoRepetitionCheck(Sq+(q-lenk),lenk)) continue; + cont = 0; for(i=0;idivdiffs[q]*beta_pow_factorial[q]).get_double(); + ds1->CurrentLength=0; // reset scratch divdiffs + for(int j = q-lenk; j >= 0; j--) ds1->AddElement((-beta)*(d->z[j]/(-beta))); // init ds1 + for(int j = 0; j <= q-lenk; j++){ + // add \Lambda_j elements to \Theta_j multiset + for(int i = 0; i <= j; i++) ds1->AddElement((-beta)*(d->z[i]/(-beta))); + T += prefac*currMD0_trace[j]*beta*(ds1->divdiffs[q-lenk+1]*beta_pow_factorial[q-lenk+1]).get_double() * + (currD_partial[q-lenk]/currD)/(factorial[lenk]); + ds1->AddElement(0); + T += prefac*currMD0_trace[j]*(j+1.0)*(ds1->divdiffs[q-lenk+2]*beta_pow_factorial[q-lenk+2]).get_double() * + (currD_partial[q-lenk]/currD)/(factorial[lenk]); + for(int u = 0; u <= j; u++){ + ds1->AddElement(-beta*(d->z[u]/(-beta))); + T += prefac*currMD0_trace[j]*(d->z[u]/(-beta))*(ds1->divdiffs[q-lenk+3]*beta_pow_factorial[q-lenk+3]).get_double() * + (currD_partial[q-lenk]/currD)/(factorial[lenk]); + ds1->RemoveElement(); + } + // remove \Lambda_j elements from \Theta_j multiset + ds1->RemoveElement(); // Remove 0 + for(int i = 0; i <= j; i++) ds1->RemoveElement(); + ds1->RemoveElement(); // Remove from e{-tau[]} multiset + } + for(l=0;lq) continue; + calc_MD_trace_l(n ,l); + // compute (k, l) Lebniz rule contribution + prefac = -Akz / ((d->divdiffs[q]*beta_pow_factorial[q]).get_double()); + ds1->CurrentLength=0; // reset scratch divdiffs + for(int j = q-lenk; j >= lenl; j--) ds1->AddElement((-beta)*(d->z[j]/(-beta))); // init ds1 + ds1->AddElement(0); // add dummy value for continue logic + for (int j = lenl; j <= q-lenk; j++){ + ds1->RemoveElement(); // remove before check so things work with continue + // remainder of 1^{(j)}_{\tilde{P}_l}(S_{i_q}) condition + if(!NoRepetitionCheck(Sq+(j-lenl),lenl)) continue; + cont = 0; for(i=0;iAddElement((-beta)*(d->z[i]/(-beta))); + // add contribution if relevant + T += prefac*currMDl_trace[j]*beta*(ds1->divdiffs[q-lenk-lenl+1]*beta_pow_factorial[q-lenk-lenl+1]).get_double() * + (currD_partial[j-lenl]/currD) * (currD_partial[q-lenk]/currD_partial[j])/(factorial[lenk]*factorial[lenl]); + ds1->AddElement(0); + T += prefac*currMDl_trace[j]*(j-lenl+1.0)*(ds1->divdiffs[q-lenk-lenl+2]*beta_pow_factorial[q-lenk-lenl+2]).get_double() * + (currD_partial[j-lenl]/currD) * (currD_partial[q-lenk]/currD_partial[j])/(factorial[lenk]*factorial[lenl]); + for(int u = 0; u <= j-lenl; u++){ + ds1->AddElement(-beta*(d->z[u]/(-beta))); + T += prefac*currMDl_trace[j]*(d->z[u]/(-beta))*(ds1->divdiffs[q-lenk-lenl+3]*beta_pow_factorial[q-lenk-lenl+3]).get_double() * + (currD_partial[j-lenl]/currD) * (currD_partial[q-lenk]/currD_partial[j])/(factorial[lenk]*factorial[lenl]); + ds1->RemoveElement(); + } + ds1->RemoveElement(); // remove 0 + // remove \Lambda_j elements from \Theta_j multiset + for(int i = 0; i <= j-lenl; i++) ds1->RemoveElement(); + } + } + + } + R = (currD*T).real()/currD.real(); + break; + // measures \int_0^{\beta/2} \dtau \tau + case 3: + // add (k=0, l=0) pure diagonal contribution with Leibniz rule (like Hdiag_int3) + calc_MD0_trace(n); + prefac = -A0z / (d->divdiffs[q]*beta_pow_factorial[q]).get_double(); + for(int j = 0; j <= q; j++) { + // reset and init divdiffs + ds1->CurrentLength=0; ds2->CurrentLength=0; + for(int i = q; i >= j; i--) ds2->AddElement((-beta/2)*(d->z[i]/(-beta))); + for(int i = j; i >= 0; i--) ds2->AddElement((-beta/2)*(d->z[i]/(-beta))); + // sum over r to accumulate contribution for fixed j + for(int r = 0; r <= j; r++) { + ds1->AddElement((-beta/2.0)*(d->z[r]/(-beta))); + inner_prefac = prefac * currMD0_trace[j] * (ds1->divdiffs[r]*beta_div2_pow_factorial[r]).get_double(); + T += (beta/2.0)*inner_prefac*(ds2->divdiffs[q+1-r]*beta_div2_pow_factorial[q+1-r]).get_double(); + ds2->AddElement(0); + T += (inner_prefac*(j-r+1.0)*(ds2->divdiffs[q+2-r]*beta_div2_pow_factorial[q+2-r]).get_double()); + for(int u = r; u <= j; u++){ + ds2->AddElement((-beta/2.0)*(d->z[u]/(-beta))); + T += (d->z[u]/(-beta))*inner_prefac*(ds2->divdiffs[q+3-r]*beta_div2_pow_factorial[q+3-r]).get_double(); + ds2->RemoveElement(); + } + ds2->RemoveElement(); + ds2->RemoveElement(); + } + } + // add (k=0, l) cross term contribution + prefac = -A0z / ((d->divdiffs[q]*beta_pow_factorial[q]).get_double()); + for(l=0;lq) continue; + calc_MD_trace_l(n ,l); + for (int j = lenl; j <= q; j++){ + // remainder of 1^{(j)}_{\tilde{P}_l}(S_{i_q}) condition + if(!NoRepetitionCheck(Sq+(j-lenl),lenl)) continue; + cont = 0; for(i=0;iCurrentLength=0; ds2->CurrentLength=0; // reset scratch divdiffs + // build e^{-\beta/2 [\Lambda_j]} + for(int i = j; i <= q; i++) ds2->AddElement((-beta/2)*(d->z[i]/(-beta))); + // build e^{-\beta/2 [\Omega_{j,r}]} (aka add [\overline{\Theta_{j,r}}] to [\Lambda_j] + for(int i = j-lenl; i>=0; i--) ds2->AddElement((-beta/2)*(d->z[i]/(-beta))); + // sum over r + for(int r = 0; r <= j-lenl; r++){ + // build e^{-\beta/2 [\Theta_{j,r}]} + //for(int i = 0; i <= r; i++) ds2->AddElement((-beta/2)*(d->z[i]/(-beta))); + ds1->AddElement((-beta/2)*(d->z[r]/(-beta))); + inner_prefac = prefac*(currMDl_trace[j]/factorial[lenl])*(currD_partial[j-lenl]/currD_partial[j])*(ds1->divdiffs[r]*beta_div2_pow_factorial[r]).get_double(); + // add (beta/2)*e^{-beta/2[\Omega_{j,r}]} contribution + T += inner_prefac*(beta/2)*(ds2->divdiffs[q+1-lenl-r]*beta_div2_pow_factorial[q+1-lenl-r]).get_double(); + // add N([\overline{\theta}_{j,r}]) e^{-beta/2[\Omega_{j,r}] + {0}} contribution + ds2->AddElement(0); + T += inner_prefac*(j-lenl-r+1.0)*(ds2->divdiffs[q+2-lenl-r]*beta_div2_pow_factorial[q+2-lenl-r]).get_double(); + // add \sum_u contribution + for(int u = r; u<= j-lenl; u++){ + ds2->AddElement((-beta/2)*(d->z[u]/(-beta))); + T += inner_prefac*(d->z[u]/(-beta))*(ds2->divdiffs[q+3-lenl-r]*beta_div2_pow_factorial[q+3-lenl-r]).get_double(); + ds2->RemoveElement(); + } + ds2->RemoveElement(); // removes AddElement(0) + ds2->RemoveElement(); // removes AddElement((-beta/2)*(d->z[r]/(-beta))); + } + } + } + // add (k, l=0) and (k, l) contributions + for(k=0;kq) continue; + if(!NoRepetitionCheck(Sq+(q-lenk),lenk)) continue; + cont = 0; for(i=0;idivdiffs[q]*beta_pow_factorial[q]).get_double()); + for (int j = 0; j <= q-lenk; j++){ + ds1->CurrentLength=0; ds2->CurrentLength=0; // reset scratch divdiffs + // build e^{-\beta/2 [\Lambda_j]} + for(int i = j; i <= q-lenk; i++) ds2->AddElement((-beta/2)*(d->z[i]/(-beta))); + // build e^{-\beta/2 [\Omega_{j,r}]} (aka add [\overline{\Theta_{j,r}}] to [\Lambda_j] + for(int i = j; i>=0; i--) ds2->AddElement((-beta/2)*(d->z[i]/(-beta))); + // sum over r + for(int r = 0; r <= j; r++){ + // build e^{-\beta/2 [\Theta_{j,r}]} + //for(int i = 0; i <= r; i++) ds2->AddElement((-beta/2)*(d->z[i]/(-beta))); + ds1->AddElement((-beta/2)*(d->z[r]/(-beta))); + inner_prefac = prefac*currMD0_trace[j]*(currD_partial[q-lenk]/currD)*(ds1->divdiffs[r]*beta_div2_pow_factorial[r]).get_double(); + // add (beta/2)*e^{-beta/2[\Omega_{j,r}]} contribution + T += inner_prefac*(beta/2)*(ds2->divdiffs[q+1-lenk-r]*beta_div2_pow_factorial[q+1-lenk-r]).get_double(); + // add N([\overline{\theta}_{j,r}]) e^{-beta/2[\Omega_{j,r}] + {0}} contribution + ds2->AddElement(0); + T += inner_prefac*(j-r+1.0)*(ds2->divdiffs[q+2-lenk-r]*beta_div2_pow_factorial[q+2-lenk-r]).get_double(); + // add \sum_u contribution + for(int u = r; u<= j; u++){ + ds2->AddElement((-beta/2)*(d->z[u]/(-beta))); + T += inner_prefac*(d->z[u]/(-beta))*(ds2->divdiffs[q+3-lenk-r]*beta_div2_pow_factorial[q+3-lenk-r]).get_double(); + ds2->RemoveElement(); + } + ds2->RemoveElement(); // removes AddElement(0) + ds2->RemoveElement(); // removes AddElement((-beta/2)*(d->z[r]/(-beta))); + } + } + // compute \sum_l (k, l) Leibniz rule contribution + for(l=0;lq) continue; + calc_MD_trace_l(n ,l); + prefac = -(Akz / factorial[lenk]) / ((d->divdiffs[q]*beta_pow_factorial[q]).get_double()); + for (int j = lenl; j <= q-lenk; j++){ + // remainder of 1^{(j)}_{\tilde{P}_l}(S_{i_q}) condition + if(!NoRepetitionCheck(Sq+(j-lenl),lenl)) continue; + cont = 0; for(i=0;iCurrentLength=0; ds2->CurrentLength=0; // reset scratch divdiffs + // build e^{-\beta/2 [\Lambda_j]} + for(int i = j; i <= q-lenk; i++) ds2->AddElement((-beta/2.0)*(d->z[i]/(-beta))); + // build e^{-\beta/2 [\Omega_{j,r}]} (aka add [\overline{\Theta_{j,r}}] to [\Lambda_j] + for(int i = j-lenl; i>=0; i--) ds2->AddElement((-beta/2.0)*(d->z[i]/(-beta))); + // sum over r + for(int r = 0; r <= j-lenl; r++){ + // build e^{-\beta/2 [\Theta_{j,r}]} + //for(int i = 0; i <= r; i++) ds2->AddElement((-beta/2)*(d->z[i]/(-beta))); + ds1->AddElement((-beta/2.0)*(d->z[r]/(-beta))); + inner_prefac = prefac*(currMDl_trace[j]/factorial[lenl])*(currD_partial[j-lenl]/currD)*(currD_partial[q-lenk]/currD_partial[j]) * + (ds1->divdiffs[r]*beta_div2_pow_factorial[r]).get_double(); + // add (beta/2)*e^{-beta/2[\Omega_{j,r}]} contribution + T += inner_prefac*(beta/2.0)*(ds2->divdiffs[q+1-lenl-lenk-r]*beta_div2_pow_factorial[q+1-lenl-lenk-r]).get_double(); + // add N([\overline{\theta}_{j,r}]) e^{-beta/2[\Omega_{j,r}] + {0}} contribution + ds2->AddElement(0); + T += inner_prefac*(j-lenl-r+1.0)*(ds2->divdiffs[q+2-lenl-lenk-r]*beta_div2_pow_factorial[q+2-lenl-lenk-r]).get_double(); + // add \sum_u contribution + for(int u = r; u<= j-lenl; u++){ + ds2->AddElement((-beta/2.0)*(d->z[u]/(-beta))); + T += inner_prefac*(d->z[u]/(-beta))*(ds2->divdiffs[q+3-lenl-lenk-r]*beta_div2_pow_factorial[q+3-lenl-lenk-r]).get_double(); + ds2->RemoveElement(); + } + ds2->RemoveElement(); // removes AddElement(0) + ds2->RemoveElement(); // removes AddElement((-beta/2)*(d->z[r]/(-beta))); + } + } + } + + } + R = (currD*T).real()/currD.real(); + break; + } +#endif + } else switch(n-Nobservables){ + case 0: R = measure_H(); break; + case 1: R = measure_H2(); break; + case 2: R = measure_Hdiag(); break; + case 3: R = measure_Hdiag2(); break; + case 4: R = measure_Hoffdiag(); break; + case 5: R = measure_Hoffdiag2(); break; + case 6: R = measure_Z_magnetization(); break; + case 7: R = measure_Hdiag_corr(); break; + case 8: R = measure_Hdiag_int1(); break; + case 9: R = measure_Hdiag_int2(); break; + case 10: R = measure_Hdiag_int3(); break; + case 11: R = measure_Hoffdiag_corr(); break; + case 12: R = measure_Hoffdiag_int1(); break; + case 13: R = measure_Hoffdiag_int2(); break; + case 14: R = measure_Hoffdiag_int3(); break; + } + return R; +} + +void measure(){ + double R, sgn; int i; + currWeight = GetWeight(); sgn = currWeight.sgn(); + meanq += q; if(maxq < q) maxq = q; in_bin_sum_sgn += sgn; + if((measurement_step+1) % bin_length == 0){ + in_bin_sum_sgn /= bin_length; bin_mean_sgn[measurement_step/bin_length] = in_bin_sum_sgn; in_bin_sum_sgn = 0; + } + for(i=0;i +#include +#include +#include + +double* invPowersOf2 {NULL}; +const int maxexp = 100000; +const int extralen = 10; + +template T min (T a, T b) { return (b>=a)?a:b;} +template T max (T a, T b) { return (b>=a)?b:a;} + +class ExExFloat{ +private: + double mantissa; + int exponent; +public: + ExExFloat(){ mantissa = 0.5; exponent = 1;} + void normalize(){ int tmp; mantissa = frexp(mantissa,&tmp); exponent += tmp;} + void init_expmu(double mu){ double e = mu*1.4426950408889634; exponent = ceil(e); mantissa = pow(2.,e - ceil(e)); } + void print(){ + double exp10, m; + exp10 = (exponent*0.30102999566398114); + m = mantissa*pow(10,exp10 - floor(exp10)); + exp10 = floor(exp10); if(fabs(m)<1){ exp10--; m*=10; } + if((exp10<7)&&(exp10>-7)) printf("%.17f",m*pow(10,exp10)); else printf("%.17fe%.0f",m,exp10); + } + ExExFloat operator =(ExExFloat const &obj){ mantissa = obj.mantissa; exponent = obj.exponent; return *this;} + ExExFloat operator =(double const &obj){ mantissa = obj; exponent = 0; normalize(); return *this;} + ExExFloat operator +(ExExFloat const &obj){ // important restriction: it is assumed here that each of the summands is not equal to zero + ExExFloat res; + if(obj.exponent >= exponent){ + res.mantissa = obj.mantissa + mantissa*invPowersOf2[obj.exponent - exponent]; + res.exponent = obj.exponent; res.normalize(); + } else{ + res.mantissa = mantissa + obj.mantissa*invPowersOf2[exponent - obj.exponent]; + res.exponent = exponent; res.normalize(); + } + return res; + } + ExExFloat operator -(ExExFloat const &obj){ // important restriction: it is assumed here that each of the summands is not equal to zero + ExExFloat res; + if(obj.exponent >= exponent){ + res.mantissa = mantissa*invPowersOf2[obj.exponent - exponent] - obj.mantissa; + res.exponent = obj.exponent; res.normalize(); + } else{ + res.mantissa = mantissa - obj.mantissa*invPowersOf2[exponent - obj.exponent]; + res.exponent = exponent; res.normalize(); + } + return res; + } + ExExFloat operator +=(ExExFloat const &obj){ // important restriction: it is assumed here that each of the summands is not equal to zero + if(obj.exponent >= exponent){ + mantissa = obj.mantissa + mantissa*invPowersOf2[obj.exponent - exponent]; + exponent = obj.exponent; normalize(); + } else{ + mantissa = mantissa + obj.mantissa*invPowersOf2[exponent - obj.exponent]; + exponent = exponent; normalize(); + } + return *this; + } + ExExFloat operator -=(ExExFloat const &obj){ // important restriction: it is assumed here that each of the summands is not equal to zero + if(obj.exponent >= exponent){ + mantissa = mantissa*invPowersOf2[obj.exponent - exponent] - obj.mantissa; + exponent = obj.exponent; normalize(); + } else{ + mantissa = mantissa - obj.mantissa*invPowersOf2[exponent - obj.exponent]; + exponent = exponent; normalize(); + } + return *this; + } + ExExFloat operator *(ExExFloat const &obj){ + ExExFloat res; res.mantissa = mantissa * obj.mantissa; res.exponent = exponent + obj.exponent; + res.normalize(); return res; + } + ExExFloat operator /(ExExFloat const &obj){ + ExExFloat res; res.mantissa = mantissa / obj.mantissa; + res.exponent = exponent - obj.exponent; res.normalize(); return res; + } + ExExFloat operator *(double const &obj){ ExExFloat res; res.mantissa = mantissa * obj; res.exponent = exponent; res.normalize(); return res; } + ExExFloat operator /(double const &obj){ ExExFloat res; res.mantissa = mantissa / obj; res.exponent = exponent; res.normalize(); return res; } + ExExFloat operator *=(ExExFloat const &obj){ mantissa *= obj.mantissa; exponent += obj.exponent; normalize(); return *this;} + ExExFloat operator /=(ExExFloat const &obj){ mantissa /= obj.mantissa; exponent -= obj.exponent; normalize(); return *this;} + ExExFloat operator *=(double const &obj){ mantissa *= obj; normalize(); return *this;} + ExExFloat operator /=(double const &obj){ mantissa /= obj; normalize(); return *this;} + int operator >=(double const &r){ // important restriction: it is assumed here that both values of mantissa are not negative + if(r == 0) return (mantissa >= 0); + else{ + ExExFloat R; R = r; + if(exponent > R.exponent ) return 1; + else if((exponent == R.exponent)&&(mantissa >= R.mantissa)) return 1; + else return 0; + } + } + int operator >=(ExExFloat const &r){ // important restriction: it is assumed here that both values of mantissa are not negative + if(r.mantissa == 0) return (mantissa >= 0); + else{ + if(exponent > r.exponent ) return 1; + else if((exponent == r.exponent)&&(mantissa >= r.mantissa)) return 1; + else return 0; + } + } + double get_double(){ return ldexp(mantissa,exponent);} + int sgn(){ return (mantissa > 0.0) - (mantissa < 0.0);} + ExExFloat abs(){ExExFloat res; res.mantissa = fabs(mantissa); res.exponent = exponent; return res;} + ExExFloat SqRt(){ ExExFloat res; + if(exponent%2 == 0){ res.mantissa = sqrt(mantissa); res.exponent = exponent/2;} + else{ res.mantissa = sqrt(2*mantissa); res.exponent = (exponent-1)/2;} + res.normalize(); return res; + } +}; + +void divdiff_init(){ + invPowersOf2 = new double[maxexp]; + double curr=1; for(int i=0;i s; +} + +void AddElement(double znew, int force_s = 0, double force_mu = 0){ + int j,k,n,N; ExExFloat curr; n = CurrentLength; N = maxlen+extralen; z[n] = znew; CurrentLength++; + if(CurrentLength==1){ + s = (force_s == 0) ? 1 : force_s; + mu = (force_mu == 0) ? z[0] : force_mu; + expmu.init_expmu(mu); + h[0] = 1; for(k=1;k<=N;k++) h[k] = h[k-1]/s; + if(mu != z[0]) for(k=N;k>0;k--) h[k-1] += h[k]*(z[0]-mu)/k; + curr = expmu*h[0]; for(k=0;k=maxlen)) AddAll(CurrentLength, force_s); + else{ + for(k=N;k>n;k--) h[k-1] += h[k]*(z[n]-mu)/k; curr = expmu*h[n]; + for(k=n;k>=1;k--) h[k-1] = (h[k-1]*n + h[k]*(z[n]-z[n-k]))/(n-k+1); + for(k=0;k=1){ + n = CurrentLength - 1; N = maxlen+extralen; + for(k=1;k<=n;k++) h[k-1] = (h[k-1]*(n-k+1) - h[k]*(z[n]-z[n-k]))/n; + for(k=n+1;k<=N;k++) h[k-1] -= h[k]*(z[n]-mu)/k; + CurrentLength--; + } +} + +int RemoveValue(double value, int force_s = 0, double force_mu = 0){ // remove from bulk + int j,k,n = CurrentLength - 1,found = 0; + for(k=n;k>=0;k--) if(z[k] == value){ + for(j=n;j>=k;j--) RemoveElement(); + for(j=k;j smax)||(len >= maxlen)){ + i = maxlen; Backupz(i); FreeMem(); + if(s > smax) smax = max(smax*2,s); + if(len >= maxlen) maxlen = max(maxlen*2,len); + AllocateMem(); Restorez(i); + } + AddElement(z[0],s,mean(z,len)); + for(i=1;i 0: + u.set_as_random(l, eps, seed=seed) + uh0 = h0.conjugate(u) + uh1 = h1.conjugate(u) + uh = uh0 + lam * uh1 + + # ============================================== + # Running PMR QMC + # ============================================== + # save Hamiltonian file and O.txt file + with open("../H.txt", 'w') as f: + f.write(uh.to_pmr_str()) + with open("../O.txt", 'w') as f: + f.write(uh1.to_pmr_str()) + param_str = make_no_stand_param_fstr(seed, beta, 0.0, Tsteps, steps, stepsPerMeasurement) + with open("../parameters.hpp", "w") as f: + f.write(param_str) + # compile and run PMR-QMC + job_file = "job_file.sh" + temp_out_file = "temp_fidsus_data_zhang_prl_rot_model.txt" + with open("../" + job_file, 'w') as fh: + fh.write("#!/bin/bash\n") + fh.write("g++ -O3 -std=c++11 -o prepare.bin prepare.cpp\n") + fh.write("./prepare.bin H.txt $(ls O.txt O.txt O.txt O.txt 2> /dev/null)\n") + #fh.write("./prepare.bin H.txt\n") + fh.write("g++ -O3 -std=c++11 -o PMRQMC.bin PMRQMC.cpp\n") + fh.write(f"./PMRQMC.bin > {temp_out_file}") + print(f"Trying to submit: {job_file}") + subprocess.run(f"cd ..; chmod +x {job_file}; ./{job_file}", shell=True) + #subprocess.run(["bash", job_file]) + print("Success?") + # ============================================== + # Process and save output + # ============================================== + emergent, obs, std, time = parse_fidsus_temp("../" + temp_out_file) + #----- form file ------ + fname = f"fidsus_data_zhang_prl_rot_curve_{strnow}.csv" + # if file does not exit, open and add header + if not os.path.exists(fname): + with open(fname, "w") as f: + header1 = "n,lam,beta,H1,H1_std," + header2 = "int1,int1_std," + header3 = "int2,int2_std," + header4 = "int3,int3_std," + header5 = "rng,eps,l,Tsteps,steps,stepsPerMeasurement," + header6 = "sign,sign_std,q,qmax,time" + f.write(header1 + header2 + header3 + header4 + header5 + header6) + # always append + with open(fname, "a+") as f: + # save plot param inputs + line1 = f"\n{n},{lam},{beta},{obs[0]},{std[0]}," + line2 = f"{obs[1]},{std[1]}," + line3 = f"{obs[2]},{std[2]}," + line4 = f"{obs[3]},{std[3]}," + line5 = f"{seed},{eps},{l},{Tsteps},{steps},{stepsPerMeasurement}," + line6 = f"{emergent[0]},{emergent[1]},{emergent[2]},{emergent[3]},{time}" + f.write(line1 + line2 + line3 + line4 + line5 + line6) + + return fname +# %% + +# %% +if __name__=="__main__": + # ==================================== + # Header part of main + # ==================================== + # arg parse which experiment to do (see below) + experiment = int(sys.argv[1]) + # get current date-time + now = datetime.datetime.now() + strnow = now.strftime("%Y-%m-%d_%H-%M-%S") + # get rng seeds + with open("../utils/rng_seeds.txt", "r") as f: + seeds = f.readline().split(',') + seeds = [int(s) for s in seeds] + # ==================================== + # Experiments one can run + # ==================================== + # ********************* + # example experiment + # ********************* + if experiment == 0: + n = 2 + rng = seeds[0] + lam = 0.1 + beta = 0.25 + main(n, rng, lam, beta, strnow) + # ********************* + # Fig. 1 experiment + # ********************* + elif experiment == 1: + n = 2 + for rng in seeds[0:5]: + for beta in [5.0, 10.0, 15.0, 20.0]: + for lam in np.linspace(-1.5, 1.5, 200): + main(n, rng, lam, beta, strnow, l = 0) + # ********************* + # Fig. 2 (c),(d) experiments + # ********************* + elif experiment == 2: + # get rng seeds that had high sign + n = 100 + beta = 20.0 + high_sign_seeds = [3026438146, 2781301355, 4206712692, 2270171661, 2834120170, 1370102282, 2172515818, 185933287, 4109413259, 3629902865] + for rng in high_sign_seeds: + for lam in np.linspace(-1.5, 1.5, 200): + main(n, rng, lam, beta, strnow) + # %% diff --git a/mainqmc.hpp b/mainqmc.hpp new file mode 100644 index 0000000..b185314 --- /dev/null +++ b/mainqmc.hpp @@ -0,0 +1,1107 @@ +// +// This program implements estimators for the titular quantities in the preprint: +// Nic Ezzell, Lev Barash, Itay Hen, Exact and universal quantum Monte Carlo estimators for energy susceptibility and fidelity susceptibility. +// +// This work augments the base code whose information is below. +// +// +// This program implements Permutation Matrix Representation Quantum Monte Carlo for arbitrary spin-1/2 Hamiltonians. +// +// This program is introduced in the paper: +// Lev Barash, Arman Babakhani, Itay Hen, A quantum Monte Carlo algorithm for arbitrary spin-1/2 Hamiltonians, Physical Review Research 6, 013281 (2024). +// +// This program is licensed under a Creative Commons Attribution 4.0 International License: +// http://creativecommons.org/licenses/by/4.0/ +// +// ExExFloat datatype and calculation of divided differences are described in the paper: +// L. Gupta, L. Barash, I. Hen, Calculating the divided differences of the exponential function by addition and removal of inputs, Computer Physics Communications 254, 107385 (2020) +// + +#include +#include +#include +#include +#include +#include +#include +#include"divdiff.hpp" +#include"hamiltonian.hpp" // use a header file, which defines the Hamiltonian and the custom observables +#include"parameters.hpp" // parameters of the simulation such as the number of Monte-Carlo updates + +#define measurements (steps/stepsPerMeasurement) + +#ifdef EXHAUSTIVE_CYCLE_SEARCH + #define rmin 0 // length r of sub-sequence is chosen randomly between rmin and rmax + #define rmax cycle_max_len + #define lmin r // cycle lengths must be between lmin and lmax + #define lmax cycle_max_len +#else + #define rmin (cycle_min_len-1)/2 + #define rmax (cycle_max_len+1)/2 + #define lmin 2*r-1 + #define lmax 2*r+1 +#endif + +static std::random_device rd; +static std::mt19937 rng; +static std::uniform_int_distribution<> dice2(0,1); +static std::uniform_int_distribution<> diceN(0,N-1); +static std::uniform_int_distribution<> diceNop(0,Nop-1); +static std::uniform_real_distribution<> val(0.0,1.0); +static std::geometric_distribution<> geometric_int(0.8); + +ExExFloat beta_pow_factorial[qmax]; // contains the values (-beta)^q / q! +ExExFloat beta_div2_pow_factorial[qmax]; // contains the values (-beta/2)^q / q! +ExExFloat tau_pow_factorial[qmax]; // contains the values (-tau)^q / q! +ExExFloat tau_minus_beta_pow_factorial[qmax]; // contains the values (tau-beta)^q / q! +double factorial[qmax]; // contains the values q! +int cycle_len[Ncycles]; +int cycles_used[Ncycles]; +int n_cycles[Nop+3]; // numbers of cycles of lengths 0,1,2,...,Nop+2, the last three values are always zeros. +int cycle_min_len, cycle_max_len, found_cycles, min_index, max_index; + +#ifndef MEASURE_CUSTOM_OBSERVABLES +#define Nobservables 0 +#endif + +const int N_all_observables = Nobservables + 15; +int valid_observable[N_all_observables]; + +int bin_length = measurements / Nbins; +double in_bin_sum[N_all_observables]; +double bin_mean[N_all_observables][Nbins]; +double in_bin_sum_sgn; +double bin_mean_sgn[Nbins]; + +int q; +int qmax_achieved=0; + +divdiff* d; // primary QMC divided difference of [-\beta Ez_0,...,-\beta Ez_q] +divdiff* dfs; // QMC divided difference for fidsus [(-\beta/2)Ez_0,...,(-\beta/2)Ez_q] +divdiff* ds1; // a scratch divided diff for measurements +divdiff* ds2; // a scratch divided diff for measurements + +std::bitset lattice; +std::bitset z; +std::bitset P; +std::bitset Pk, Pl; + +int Sq[qmax]; // list of q operator indices +int Sq_backup[qmax]; +int Sq_subseq[qmax]; +int Sq_gaps[qmax]; +double Energies[qmax+1]; +double Energies_backup[qmax+1]; +int eoccupied[qmax+1]; +double currEnergy; +std::complex old_currD, currD, currMDk, currMDl, currMD0; +std::complex currD_partial[qmax], currMDk_trace[qmax], currMDl_trace[qmax], currMD0_trace[qmax]; + +ExExFloat one, currWeight; +unsigned long long step; +unsigned long long measurement_step; + +double CalcEnergy(){ // calculate the energy of a given configuration of spins + std::complex sum = 0; + for(int i=0;i calc_d(int k){ // calculate d_k = for the current configuration of spins + std::complex sum = 0; + for(int i=0;iCurrentLength=0; GetEnergies(); + for(int i=0;i<=q;i++) d->AddElement(-beta*Energies[i]); + return d->divdiffs[q] * beta_pow_factorial[q] * currD.real(); +} + +ExExFloat UpdateWeight(){ + int i, j, notfound, n=d->CurrentLength; double value; + GetEnergies(); memset(eoccupied,0,(q+1)*sizeof(int)); + for(i=0;iz[i]; + for(j=0;j<=q;j++) if(eoccupied[j]==0 && value == -beta*Energies[j]){ notfound = 0; break; } + if(notfound) break; eoccupied[j] = 1; + } + if(i==0) d->CurrentLength=0; else while(n>i){ d->RemoveElement(); n--; } + j=0; while(i<=q){ while(eoccupied[j]) j++; d->AddElement(-beta*Energies[j++]); i++; } + return d->divdiffs[q] * beta_pow_factorial[q] * currD.real(); +} + +ExExFloat UpdateWeightReplace(double removeEnergy, double addEnergy){ + if(removeEnergy != addEnergy) if(d->RemoveValue(-beta*removeEnergy)) d->AddElement(-beta*addEnergy); else{ + std::cout << "Error: energy not found" << std::endl; exit(1); + } + return d->divdiffs[q] * beta_pow_factorial[q] * currD.real(); // use this value only when the values of q and currD are correct +} + +ExExFloat UpdateWeightDel(double removeEnergy1, double removeEnergy2){ + if(d->RemoveValue(-beta*removeEnergy1) && d->RemoveValue(-beta*removeEnergy2)) + return d->divdiffs[q] * beta_pow_factorial[q] * currD.real(); // use this value only when the values of q and currD are correct + else{ + std::cout << "Error: energy not found" << std::endl; exit(1); + } +} + +ExExFloat UpdateWeightIns(double addEnergy1, double addEnergy2){ + d->AddElement(-beta*addEnergy1); d->AddElement(-beta*addEnergy2); + return d->divdiffs[q] * beta_pow_factorial[q] * currD.real(); // use this value only when the values of q and currD are correct +} + +int NoRepetitionCheck(int* sequence, int r){ // check for absence of repetitions in a sequence of length r + int i,j,rep = 1; + for(i=0;ilmax) continue; + not_contained = 0; for(j=0;j0 ? found_cycle_list[int(val(rng)*found_cycles)] : -1; // returns one of the found cycles chosen randomly. +} + +//unsigned int rng_seed; + +void init(){ + int i; double curr2=1; ExExFloat curr1; beta_pow_factorial[0] = curr1; factorial[0] = curr2; + ExExFloat curr_tau; ExExFloat curr_bmt; ExExFloat curr_beta2; + for(q=1;q=0;i--) if(dice2(rng)) lattice.set(i); z = lattice; q=0; + currWeight = GetWeight(); + for(i=0;i0 ? val(rng) : 1; ExExFloat newWeight; + if(v < 0.3){ //local move + if(v<0.1 && q>=2){ // attempt to swap Sq[m] and Sq[m+1] + m = int(val(rng)*(q-1)); // m is between 0 and (q-2) + if(Sq[m]!=Sq[m+1]){ + oldE = Energies[m+1]; old_currD = currD; + p = Sq[m]; Sq[m] = Sq[m+1]; Sq[m+1] = p; GetEnergies(); newWeight = UpdateWeightReplace(oldE,Energies[m+1]); + if(val(rng) < Metropolis(newWeight)) currWeight = newWeight; else { + Sq[m+1] = Sq[m]; Sq[m] = p; currD = old_currD; + UpdateWeightReplace(Energies[m+1],oldE); Energies[m+1] = oldE; + } + } + } else if(v<0.2){ // attempt to delete Sq[m] and Sq[m+1] + if(q>=2){ + m = int(val(rng)*(q-1)); // m is between 0 and (q-2) + if(Sq[m]==Sq[m+1]){ + oldE = Energies[m]; oldE2 = Energies[m+1]; old_currD = currD; + memcpy(Sq_backup,Sq,q*sizeof(int)); memcpy(Energies_backup,Energies,(q+1)*sizeof(double)); + for(i=m;i=m;i--) Sq[i+2] = Sq[i]; q+=2; Sq[m] = Sq[m+1] = p; + GetEnergies(); newWeight = UpdateWeightIns(Energies[m],Energies[m+1]); + if(val(rng) < Metropolis(newWeight)) currWeight = newWeight; else{ + q-=2; memcpy(Sq,Sq_backup,q*sizeof(int)); memcpy(Energies,Energies_backup,(q+1)*sizeof(double)); + currD = old_currD; d->RemoveElement(); d->RemoveElement(); + } + } else qmax_achieved = 1; + } else if(v < 0.5){ // attempting a fundamental cycle completion + int oldq, j = 0, inv_pr; double wfactor; + u = geometric_int(rng); // a random integer u is picked according to geometric distribution + if(q >= u+rmin){ + inv_pr = min(rmax,q-u)-(rmin)+1; + r = int(val(rng)*inv_pr) + (rmin); // r is random integer between rmin and min(rmax,q-u) + PickSubsequence(r+u); // indexes of the subsequence are min_index, min_index+1,..., max_index=min_index+r+u-1 + std::shuffle(Sq_subseq,Sq_subseq+r+u,rng); + if(NoRepetitionCheck(Sq_subseq,r)){ + for(i=0;i 0){ // cycles[m] is one of the found cycles, containing all the operators of Sq_subseq + P = cycles[m]; for(i=0;imax_index;i--) Sq[i+p-r] = Sq[i]; // shift the values to the right + else if(r>p) for(i=max_index+1;i=2){ // attempting a block swap + m = q==2 ? 0 : int(val(rng)*(q-1)); // m is between 0 and (q-2) + oldE = currEnergy; for(i=0;i<=m;i++) ApplyOperator(Sq[i]); + for(i=0;i<=m;i++) Sq_backup[q-1-m+i] = Sq[i]; + for(i=m+1;i calc_MD0(int n){ // calculate for the current configuration of spins and observable n + std::complex sum = 0; + for(int i=0;i og_lattice = lattice; + currMD0 = currMD0_trace[0] = calc_MD0(n); + for(int i=0;i calc_MD(int n, int k){ // calculate d_k = for the current configuration of spins and observable n + std::complex sum = 0; + for(int i=0;i og_lattice = lattice; + currMDk = currMDk_trace[0] = 1; + for(int i=0;iz[q]/(-beta); //z[q] = (-beta) * E_{z_q} + if(q > 0) R += (d->divdiffs[q-1]/d->divdiffs[q]).get_double()*q/(-beta); + return R; +} + +double measure_H2(){ + double R = (d->z[q]/(-beta))*(d->z[q]/(-beta)); + if(q>0) R += (d->z[q]/(-beta) + d->z[q-1]/(-beta))*(d->divdiffs[q-1]/d->divdiffs[q]).get_double()*q/(-beta); + if(q>1) R += (d->divdiffs[q-2]/d->divdiffs[q]).get_double()*(q*(q-1))/(-beta)/(-beta); + return R; +} + +double measure_Hdiag(){ + return currEnergy; +} + +double measure_Hdiag2(){ + return currEnergy*currEnergy; +} + +double measure_Hoffdiag(){ + double R = 0; + if(q > 0) R += (d->divdiffs[q-1]/d->divdiffs[q]).get_double()*q/(-beta); + return R; +} + +double measure_Hoffdiag2(){ + double R = (d->z[q]/(-beta))*(d->z[q]/(-beta)) + currEnergy*(currEnergy - 2*measure_H()); + if(q>0) R += (d->z[q]/(-beta) + d->z[q-1]/(-beta))*(d->divdiffs[q-1]/d->divdiffs[q]).get_double()*q/(-beta); + if(q>1) R += (d->divdiffs[q-2]/d->divdiffs[q]).get_double()*(q*(q-1))/(-beta)/(-beta); + return R; +} + +double measure_Hdiag_corr(){ + double R = (d->z[0]/(-beta))/(d->divdiffs[q]*beta_pow_factorial[q]).get_double(); + ds1->CurrentLength=0; ds2->CurrentLength=0; // reset scratch divdiffs + // init ds2 with full divided difference + double tot_num = 0; + for(int j = q; j >= 0; j--) ds2->AddElement((tau - beta)*(d->z[j]/(-beta))); + for(int j = 0; j <= q; j++) { + ds1->AddElement(-tau*(d->z[j]/(-beta))); + tot_num += (d->z[j]/(-beta))*((ds1->divdiffs[j]*tau_pow_factorial[j])*(ds2->divdiffs[q-j]*tau_minus_beta_pow_factorial[q-j])).get_double(); + ds2->RemoveElement(); + } + R *= tot_num; + return R; +} + +double measure_Hoffdiag_corr(){ + double R = measure_H2(); + R += measure_Hdiag_corr(); + R -= 2.0 * (d->z[0]/(-beta)) * measure_H(); + return R; +} + +double measure_Hdiag_int1(){ + double R = -(d->z[0]/(-beta))/(d->divdiffs[q]*beta_pow_factorial[q]).get_double(); + double tot_numerator = 0; + for(int j = 0; j <= q; j++) { + d->AddElement(-beta*(d->z[j]/(-beta))); + tot_numerator += (d->z[j]/(-beta))*(d->divdiffs[q+1]*beta_pow_factorial[q+1]).get_double(); + d->RemoveElement(); + } + R *= tot_numerator; + return R; +} + +double measure_Hoffdiag_int1(){ + double R = beta * measure_H2(); + R += measure_Hdiag_int1(); + R -= 2.0 * beta * (d->z[0]/(-beta)) * measure_H(); + return R; +} + +double measure_Hdiag_int2(){ + double R = 0.0; + double prefac = -(d->z[0]/(-beta))/(d->divdiffs[q]*beta_pow_factorial[q]).get_double(); + double Ezj, Ezu; + for(int j = 0; j <= q; j++) { + Ezj = d->z[j]/(-beta); + d->AddElement(-beta*Ezj); + R += prefac*Ezj*beta*(d->divdiffs[q+1]*beta_pow_factorial[q+1]).get_double(); + d->AddElement(0); + R += prefac*Ezj*(j+1.0)*(d->divdiffs[q+2]*beta_pow_factorial[q+2]).get_double(); + for(int u = 0; u <= j; u++){ + Ezu = d->z[u]/(-beta); + d->AddElement(-beta*Ezu); + R += prefac*Ezj*Ezu*(d->divdiffs[q+3]*beta_pow_factorial[q+3]).get_double(); + d->RemoveElement(); + } + d->RemoveElement(); + d->RemoveElement(); + } + return R; +} + +double measure_Hoffdiag_int2(){ + double R = (beta * beta * measure_H2()) / 2.0; + R += measure_Hdiag_int2(); + R -= beta * beta * (d->z[0]/(-beta)) * measure_H(); + return R; +} + +double measure_Hdiag_int3(){ + double R = -(d->z[0]/(-beta))/(d->divdiffs[q]*beta_pow_factorial[q]).get_double(); + double tot_numerator = 0; + double factor = 0; + for(int j = 0; j <= q; j++) { + // reset and init divdiffs + ds1->CurrentLength=0; ds2->CurrentLength=0; + for(int k = q; k >= j; k--) ds2->AddElement((-beta/2)*(d->z[k]/(-beta))); + ds2->AddElement((-beta/2)*(d->z[j]/(-beta))); + for(int k = j-1; k >= 0; k--) ds2->AddElement((-beta/2)*(d->z[k]/(-beta))); + // sum over r to accumulate contribution for fixed j + for(int r = 0; r <= j; r++) { + ds1->AddElement((-beta/2)*(d->z[r]/(-beta))); + factor = (d->z[j]/(-beta))*(ds1->divdiffs[r]*beta_div2_pow_factorial[r]).get_double(); + tot_numerator += (factor*(beta/2)*(ds2->divdiffs[q+1-r]*beta_div2_pow_factorial[q+1-r]).get_double()); + ds2->AddElement(0); + tot_numerator += (factor*(j-r+1)*(ds2->divdiffs[q+2-r]*beta_div2_pow_factorial[q+2-r]).get_double()); + for(int u = r; u <= j; u++){ + ds2->AddElement((-beta/2)*(d->z[u]/(-beta))); + tot_numerator += (factor*(d->z[u]/(-beta))*(ds2->divdiffs[q+3-r]*beta_div2_pow_factorial[q+3-r]).get_double()); + ds2->RemoveElement(); + } + ds2->RemoveElement(); + ds2->RemoveElement(); + } + } + R *= tot_numerator; + return R; +} + +double measure_Hoffdiag_int3(){ + double R = (beta * beta * measure_H2()) / 8.0; + R += measure_Hdiag_int3(); + R -= (beta * beta * (d->z[0]/(-beta)) * measure_H()) / 4.0; + return R; +} + +double measure_Z_magnetization(){ + return (2.0*lattice.count() - N)/N; +} + +std::string name_of_observable(int n){ + std::string s; + if(n < Nobservables){ +#ifdef MEASURE_CUSTOM_OBSERVABLES + s = Mnames[n]; +#endif + } else switch(n-Nobservables){ + case 0: s = "H"; break; + case 1: s = "H^2"; break; + case 2: s = "H_{diag}"; break; + case 3: s = "H_{diag}^2"; break; + case 4: s = "H_{offdiag}"; break; + case 5: s = "H_{offdiag}^2"; break; + case 6: s = "Z_magnetization"; break; + case 7: s = "measure_Hdiag_corr"; break; + case 8: s = "measure_Hdiag_int1"; break; + case 9: s = "measure_Hdiag_int2"; break; + case 10: s = "measure_Hdiag_int3"; break; + case 11: s = "measure_Hoffdiag_corr"; break; + case 12: s = "measure_Hoffdiag_int1"; break; + case 13: s = "measure_Hoffdiag_int2"; break; + case 14: s = "measure_Hoffdiag_int3"; break; + } + return s; +} + +double measure_observable(int n){ + double R = 0; + if(valid_observable[n]) if(n < Nobservables){ +#ifdef MEASURE_CUSTOM_OBSERVABLES + int i,k,len,cont,l,lenk,lenl,j,t,r; + std::complex T = 0; + std::complex A0z = calc_MD0(n); + std::complex Akz = 0.0; + std::complex prefac = 0.0; + std::complex inner_prefac = 0.0; + switch(n){ + // measures + case 0: + T += A0z; // add contribution of diagonal of custom O + for(k=0;kq) continue; + // cannot have repeats in Sq[q-k+1],Sq[q-k+2],...,Sq[q] or cannot be making marked P + // because indexing from 0, actually checking Sq[q-k],Sq[q-k+1],...,Sq[q-1] + if(!NoRepetitionCheck(Sq+(q-len),len)) continue; + // Sq[q-k] * Sq[q-k+1] * ... * Sq[q-1] == P (i.e. each Sq[q-1-i] is in P is what test is doing) + cont = 0; for(i=0;idivdiffs[q-len]/d->divdiffs[q]).get_double() * + (beta_pow_factorial[q-len]/beta_pow_factorial[q]).get_double()/factorial[len] * + (currD_partial[q-len]/currD) * calc_MD(n,k); // calc_MD(n,k) gives \tilde{A}(z) + // currD_parital[q-len]/currD is be ratio in eq.32 + } + R = (currD*T).real()/currD.real(); // we importance-sample Re(W_C A_C)/Re(W_C) + break; + // measures + case 4: + T += A0z*A0z; // diag/diag component + // compute (k=0, l) contribution + for(l=0;lq) continue; // need lenl > q + if(!NoRepetitionCheck(Sq+(q-lenl),lenl)) continue; // check {Sq[q-lenl], ..., Sq[q-1]} no repeats + // checks that Sq[q-1-i] \in P_l for i=0, i=lenl-1. + cont = 0; for(i=0;idivdiffs[q-lenl]/d->divdiffs[q]).get_double() * + (beta_pow_factorial[q-lenl]/beta_pow_factorial[q]).get_double()/(factorial[lenl]) * + (currD_partial[q-lenl]/currD); + } + for(k=0;kq) continue; + if(!NoRepetitionCheck(Sq+(q-lenk),lenk)) continue; + cont = 0; for(i=0;idivdiffs[q-lenk]/d->divdiffs[q]).get_double() * + (beta_pow_factorial[q-lenk]/beta_pow_factorial[q]).get_double()/factorial[lenk] * + (currD_partial[q-lenk]/currD); + // compute (k, l) contributions + for(l=0;lq) continue; // need lenk + lenl > q + if(!NoRepetitionCheck(Sq+(q-lenk-lenl),lenl)) continue; // check {Sq[q-lenk-lenl], ..., Sq[q-1-lenk]} no repeats + // checks that Sq[q-1-lenk-i] \in P_l for i=0, i=lenl-1. + cont = 0; for(i=0;idivdiffs[q-lenk-lenl]/d->divdiffs[q]).get_double() * + (beta_pow_factorial[q-lenk-lenl]/beta_pow_factorial[q]).get_double()/(factorial[lenk]*factorial[lenl]) * + (currD_partial[q-lenk-lenl]/currD); + } + } + R = (currD*T).real()/currD.real(); + break; + // measures + case 5: + // add (k=0, l=0) pure diagonal contribution with Leibniz rule (like Hdiag_corr) + calc_MD0_trace(n); + prefac = A0z / (d->divdiffs[q]*beta_pow_factorial[q]).get_double(); + ds1->CurrentLength=0; ds2->CurrentLength=0; // reset scratch divdiffs + for(int j = q; j >= 0; j--) ds2->AddElement((tau - beta)*(d->z[j]/(-beta))); // init ds2 + for(int j = 0; j <= q; j++) { + ds1->AddElement(-tau*(d->z[j]/(-beta))); + T += prefac*currMD0_trace[j]*((ds1->divdiffs[j]*tau_pow_factorial[j])*(ds2->divdiffs[q-j]*tau_minus_beta_pow_factorial[q-j])).get_double(); + ds2->RemoveElement(); + } + // add (k=0, l) cross term contribution + prefac = A0z / ((d->divdiffs[q]*beta_pow_factorial[q]).get_double()); + for(l=0;lq) continue; + calc_MD_trace_l(n ,l); + ds1->CurrentLength=0; ds2->CurrentLength=0; // reset scratch divdiffs + for(int j = q; j >= lenl; j--) ds2->AddElement((-tau)*(d->z[j]/(-beta))); // init ds2 + ds2->AddElement(0); // add dummy value for below continue logic to hold + for (int j = lenl; j <= q; j++){ + ds2->RemoveElement(); + ds1->AddElement((-beta+tau)*(d->z[j-lenl]/(-beta))); + // remainder of 1^{(j)}_{\tilde{P}_l}(S_{i_q}) condition + if(!NoRepetitionCheck(Sq+(j-lenl),lenl)) continue; + cont = 0; for(i=0;idivdiffs[j-lenl]*tau_minus_beta_pow_factorial[j-lenl]).get_double() * + (ds2->divdiffs[q-j]*tau_pow_factorial[q-j]).get_double()/(factorial[lenl]) * + (currD_partial[j-lenl]/currD_partial[j]); + } + } + // add (k, l=0) and (k, l) contributions + for(k=0;kq) continue; + if(!NoRepetitionCheck(Sq+(q-lenk),lenk)) continue; + cont = 0; for(i=0;idivdiffs[q]*beta_pow_factorial[q]).get_double(); + ds1->CurrentLength=0; ds2->CurrentLength=0; // reset scratch divdiffs + for(int j = q-lenk; j >= 0; j--) ds2->AddElement((-tau)*(d->z[j]/(-beta))); // init ds2 + for(int j = 0; j <= q-lenk; j++){ + ds1->AddElement((-beta+tau)*(d->z[j]/(-beta))); + T += prefac*currMD0_trace[j]*(ds1->divdiffs[j]*tau_minus_beta_pow_factorial[j]).get_double() * + (ds2->divdiffs[q-lenk-j]*tau_pow_factorial[q-lenk-j]).get_double()/(factorial[lenk]) * + (currD_partial[q-lenk]/currD); + ds2->RemoveElement(); + } + for(l=0;lq) continue; + calc_MD_trace_l(n ,l); + // compute (k, l) Lebniz rule contribution + std::complex prefac = Akz / ((d->divdiffs[q]*beta_pow_factorial[q]).get_double()); + ds1->CurrentLength=0; ds2->CurrentLength=0; // reset scratch divdiffs + for(int j = q-lenk; j >= lenl; j--) ds2->AddElement((-tau)*(d->z[j]/(-beta))); // init ds2 + ds2->AddElement(0); + for (int j = lenl; j <= q-lenk; j++){ + ds2->RemoveElement(); + ds1->AddElement((-beta+tau)*(d->z[j-lenl]/(-beta))); + // remainder of 1^{(j)}_{\tilde{P}_l}(S_{i_q}) condition + if(!NoRepetitionCheck(Sq+(j-lenl),lenl)) continue; + cont = 0; for(i=0;idivdiffs[j-lenl]*tau_minus_beta_pow_factorial[j-lenl]).get_double() * + (ds2->divdiffs[q-lenk-j]*tau_pow_factorial[q-lenk-j]).get_double()/(factorial[lenk]*factorial[lenl]) * + (currD_partial[j-lenl]/currD) * (currD_partial[q-lenk]/currD_partial[j]); + } + } + + } + R = (currD*T).real()/currD.real(); + break; + // measures \int_0^\beta \dtau + case 1: + // add (k=0, l=0) pure diagonal contribution with Leibniz rule (like Hdiag_int1) + calc_MD0_trace(n); + prefac = -A0z / (d->divdiffs[q]*beta_pow_factorial[q]).get_double(); + for(int j = 0; j <= q; j++) { + d->AddElement(-beta*(d->z[j]/(-beta))); + T += prefac*currMD0_trace[j]*(d->divdiffs[q+1]*beta_pow_factorial[q+1]).get_double(); + d->RemoveElement(); + } + // add (k=0, l) cross term contribution + prefac = -A0z / ((d->divdiffs[q]*beta_pow_factorial[q]).get_double()); + for(l=0;lq) continue; + calc_MD_trace_l(n ,l); + ds1->CurrentLength=0; // reset scratch divdiffs + for(int j = q; j >= lenl; j--) ds1->AddElement((-beta)*(d->z[j]/(-beta))); // init ds1 + ds1->AddElement(0); // add dummy value for continue logic + for(int j = lenl; j <= q; j++){ + ds1->RemoveElement(); // remove before check so things work with continue + // remainder of 1^{(j)}_{\tilde{P}_l}(S_{i_q}) condition + if(!NoRepetitionCheck(Sq+(j-lenl),lenl)) continue; + cont = 0; for(i=0;iAddElement((-beta)*(d->z[i]/(-beta))); + // add contribution if relevant + T += prefac*currMDl_trace[j]*(ds1->divdiffs[q-lenl+1]*beta_pow_factorial[q-lenl+1]).get_double() * + (currD_partial[j-lenl]/currD_partial[j])/(factorial[lenl]); + // remove \Lambda_j elements from \Theta_j multiset + for(int i = 0; i <= j-lenl; i++) ds1->RemoveElement(); + ds1->RemoveElement(); + } + } + // add (k, l=0) and (k, l) contributions + for(k=0;kq) continue; + if(!NoRepetitionCheck(Sq+(q-lenk),lenk)) continue; + cont = 0; for(i=0;idivdiffs[q]*beta_pow_factorial[q]).get_double(); + ds1->CurrentLength=0; // reset scratch divdiffs + for(int j = q-lenk; j >= 0; j--) ds1->AddElement((-beta)*(d->z[j]/(-beta))); // init ds1 + for(int j = 0; j <= q-lenk; j++){ + // add \Lambda_j elements to \Theta_j multiset + for(int i = 0; i <= j; i++) ds1->AddElement((-beta)*(d->z[i]/(-beta))); + T += prefac*currMD0_trace[j]*(ds1->divdiffs[q-lenk+1]*beta_pow_factorial[q-lenk+1]).get_double() * + (currD_partial[q-lenk]/currD)/(factorial[lenk]); + // remove \Lambda_j elements from \Theta_j multiset + for(int i = 0; i <= j; i++) ds1->RemoveElement(); + ds1->RemoveElement(); + } + for(l=0;lq) continue; + calc_MD_trace_l(n ,l); + // compute (k, l) Lebniz rule contribution + prefac = -Akz / ((d->divdiffs[q]*beta_pow_factorial[q]).get_double()); + ds1->CurrentLength=0; // reset scratch divdiffs + for(int j = q-lenk; j >= lenl; j--) ds1->AddElement((-beta)*(d->z[j]/(-beta))); // init ds1 + ds1->AddElement(0); // add dummy value for continue logic + for (int j = lenl; j <= q-lenk; j++){ + ds1->RemoveElement(); // remove before check so things work with continue + // remainder of 1^{(j)}_{\tilde{P}_l}(S_{i_q}) condition + if(!NoRepetitionCheck(Sq+(j-lenl),lenl)) continue; + cont = 0; for(i=0;iAddElement((-beta)*(d->z[i]/(-beta))); + // add contribution if relevant + T += prefac*currMDl_trace[j]*(ds1->divdiffs[q-lenk-lenl+1]*beta_pow_factorial[q-lenk-lenl+1]).get_double() * + (currD_partial[j-lenl]/currD) * (currD_partial[q-lenk]/currD_partial[j])/(factorial[lenk]*factorial[lenl]); + // remove \Lambda_j elements from \Theta_j multiset + for(int i = 0; i <= j-lenl; i++) ds1->RemoveElement(); + } + } + + } + R = (currD*T).real()/currD.real(); + break; + // measures \int_0^\beta \dtau \tau + case 2: + // add (k=0, l=0) pure diagonal contribution with Leibniz rule (like Hdiag_int2) + calc_MD0_trace(n); + prefac = -A0z / (d->divdiffs[q]*beta_pow_factorial[q]).get_double(); + for(int j = 0; j <= q; j++) { + d->AddElement(-beta*(d->z[j]/(-beta))); + T += prefac*currMD0_trace[j]*beta*(d->divdiffs[q+1]*beta_pow_factorial[q+1]).get_double(); + d->AddElement(0); + T += prefac*currMD0_trace[j]*(j+1.0)*(d->divdiffs[q+2]*beta_pow_factorial[q+2]).get_double(); + for(int u = 0; u <= j; u++){ + d->AddElement(-beta*(d->z[u]/(-beta))); + T += prefac*currMD0_trace[j]*(d->z[u]/(-beta))*(d->divdiffs[q+3]*beta_pow_factorial[q+3]).get_double(); + d->RemoveElement(); + } + d->RemoveElement(); + d->RemoveElement(); + } + // add (k=0, l) cross term contribution + prefac = -A0z / ((d->divdiffs[q]*beta_pow_factorial[q]).get_double()); + for(l=0;lq) continue; + calc_MD_trace_l(n ,l); + ds1->CurrentLength=0; // reset scratch divdiffs + for(int j = q; j >= lenl; j--) ds1->AddElement((-beta)*(d->z[j]/(-beta))); // init ds1 + ds1->AddElement(0); // add dummy value for continue logic + for(int j = lenl; j <= q; j++){ + ds1->RemoveElement(); // remove before check so things work with continue + // remainder of 1^{(j)}_{\tilde{P}_l}(S_{i_q}) condition + if(!NoRepetitionCheck(Sq+(j-lenl),lenl)) continue; + cont = 0; for(i=0;iAddElement((-beta)*(d->z[i]/(-beta))); + // add contribution if relevant + T += prefac*currMDl_trace[j]*beta*(ds1->divdiffs[q-lenl+1]*beta_pow_factorial[q-lenl+1]).get_double() * + (currD_partial[j-lenl]/currD_partial[j])/(factorial[lenl]); + ds1->AddElement(0); + T += prefac*currMDl_trace[j]*(j-lenl+1.0)*(ds1->divdiffs[q-lenl+2]*beta_pow_factorial[q-lenl+2]).get_double() * + (currD_partial[j-lenl]/currD_partial[j])/(factorial[lenl]); + for(int u = 0; u <= j-lenl; u++){ + ds1->AddElement(-beta*(d->z[u]/(-beta))); + T += prefac*currMDl_trace[j]*(d->z[u]/(-beta))*(ds1->divdiffs[q-lenl+3]*beta_pow_factorial[q-lenl+3]).get_double() * + (currD_partial[j-lenl]/currD_partial[j])/(factorial[lenl]); + ds1->RemoveElement(); + } + // remove \Lambda_j elements from \Theta_j multiset + for(int i = 0; i <= j-lenl; i++) ds1->RemoveElement(); + ds1->RemoveElement(); + } + } + // add (k, l=0) and (k, l) contributions + for(k=0;kq) continue; + if(!NoRepetitionCheck(Sq+(q-lenk),lenk)) continue; + cont = 0; for(i=0;idivdiffs[q]*beta_pow_factorial[q]).get_double(); + ds1->CurrentLength=0; // reset scratch divdiffs + for(int j = q-lenk; j >= 0; j--) ds1->AddElement((-beta)*(d->z[j]/(-beta))); // init ds1 + for(int j = 0; j <= q-lenk; j++){ + // add \Lambda_j elements to \Theta_j multiset + for(int i = 0; i <= j; i++) ds1->AddElement((-beta)*(d->z[i]/(-beta))); + T += prefac*currMD0_trace[j]*beta*(ds1->divdiffs[q-lenk+1]*beta_pow_factorial[q-lenk+1]).get_double() * + (currD_partial[q-lenk]/currD)/(factorial[lenk]); + ds1->AddElement(0); + T += prefac*currMD0_trace[j]*(j+1.0)*(ds1->divdiffs[q-lenk+2]*beta_pow_factorial[q-lenk+2]).get_double() * + (currD_partial[q-lenk]/currD)/(factorial[lenk]); + for(int u = 0; u <= j; u++){ + ds1->AddElement(-beta*(d->z[u]/(-beta))); + T += prefac*currMD0_trace[j]*(d->z[u]/(-beta))*(ds1->divdiffs[q-lenk+3]*beta_pow_factorial[q-lenk+3]).get_double() * + (currD_partial[q-lenk]/currD)/(factorial[lenk]); + ds1->RemoveElement(); + } + // remove \Lambda_j elements from \Theta_j multiset + ds1->RemoveElement(); // Remove 0 + for(int i = 0; i <= j; i++) ds1->RemoveElement(); + ds1->RemoveElement(); // Remove from e{-tau[]} multiset + } + for(l=0;lq) continue; + calc_MD_trace_l(n ,l); + // compute (k, l) Lebniz rule contribution + prefac = -Akz / ((d->divdiffs[q]*beta_pow_factorial[q]).get_double()); + ds1->CurrentLength=0; // reset scratch divdiffs + for(int j = q-lenk; j >= lenl; j--) ds1->AddElement((-beta)*(d->z[j]/(-beta))); // init ds1 + ds1->AddElement(0); // add dummy value for continue logic + for (int j = lenl; j <= q-lenk; j++){ + ds1->RemoveElement(); // remove before check so things work with continue + // remainder of 1^{(j)}_{\tilde{P}_l}(S_{i_q}) condition + if(!NoRepetitionCheck(Sq+(j-lenl),lenl)) continue; + cont = 0; for(i=0;iAddElement((-beta)*(d->z[i]/(-beta))); + // add contribution if relevant + T += prefac*currMDl_trace[j]*beta*(ds1->divdiffs[q-lenk-lenl+1]*beta_pow_factorial[q-lenk-lenl+1]).get_double() * + (currD_partial[j-lenl]/currD) * (currD_partial[q-lenk]/currD_partial[j])/(factorial[lenk]*factorial[lenl]); + ds1->AddElement(0); + T += prefac*currMDl_trace[j]*(j-lenl+1.0)*(ds1->divdiffs[q-lenk-lenl+2]*beta_pow_factorial[q-lenk-lenl+2]).get_double() * + (currD_partial[j-lenl]/currD) * (currD_partial[q-lenk]/currD_partial[j])/(factorial[lenk]*factorial[lenl]); + for(int u = 0; u <= j-lenl; u++){ + ds1->AddElement(-beta*(d->z[u]/(-beta))); + T += prefac*currMDl_trace[j]*(d->z[u]/(-beta))*(ds1->divdiffs[q-lenk-lenl+3]*beta_pow_factorial[q-lenk-lenl+3]).get_double() * + (currD_partial[j-lenl]/currD) * (currD_partial[q-lenk]/currD_partial[j])/(factorial[lenk]*factorial[lenl]); + ds1->RemoveElement(); + } + ds1->RemoveElement(); // remove 0 + // remove \Lambda_j elements from \Theta_j multiset + for(int i = 0; i <= j-lenl; i++) ds1->RemoveElement(); + } + } + + } + R = (currD*T).real()/currD.real(); + break; + // measures \int_0^{\beta/2} \dtau \tau + case 3: + // add (k=0, l=0) pure diagonal contribution with Leibniz rule (like Hdiag_int3) + calc_MD0_trace(n); + prefac = -A0z / (d->divdiffs[q]*beta_pow_factorial[q]).get_double(); + for(int j = 0; j <= q; j++) { + // reset and init divdiffs + ds1->CurrentLength=0; ds2->CurrentLength=0; + for(int i = q; i >= j; i--) ds2->AddElement((-beta/2)*(d->z[i]/(-beta))); + for(int i = j; i >= 0; i--) ds2->AddElement((-beta/2)*(d->z[i]/(-beta))); + // sum over r to accumulate contribution for fixed j + for(int r = 0; r <= j; r++) { + ds1->AddElement((-beta/2.0)*(d->z[r]/(-beta))); + inner_prefac = prefac * currMD0_trace[j] * (ds1->divdiffs[r]*beta_div2_pow_factorial[r]).get_double(); + T += (beta/2.0)*inner_prefac*(ds2->divdiffs[q+1-r]*beta_div2_pow_factorial[q+1-r]).get_double(); + ds2->AddElement(0); + T += (inner_prefac*(j-r+1.0)*(ds2->divdiffs[q+2-r]*beta_div2_pow_factorial[q+2-r]).get_double()); + for(int u = r; u <= j; u++){ + ds2->AddElement((-beta/2.0)*(d->z[u]/(-beta))); + T += (d->z[u]/(-beta))*inner_prefac*(ds2->divdiffs[q+3-r]*beta_div2_pow_factorial[q+3-r]).get_double(); + ds2->RemoveElement(); + } + ds2->RemoveElement(); + ds2->RemoveElement(); + } + } + // add (k=0, l) cross term contribution + prefac = -A0z / ((d->divdiffs[q]*beta_pow_factorial[q]).get_double()); + for(l=0;lq) continue; + calc_MD_trace_l(n ,l); + for (int j = lenl; j <= q; j++){ + // remainder of 1^{(j)}_{\tilde{P}_l}(S_{i_q}) condition + if(!NoRepetitionCheck(Sq+(j-lenl),lenl)) continue; + cont = 0; for(i=0;iCurrentLength=0; ds2->CurrentLength=0; // reset scratch divdiffs + // build e^{-\beta/2 [\Lambda_j]} + for(int i = j; i <= q; i++) ds2->AddElement((-beta/2)*(d->z[i]/(-beta))); + // build e^{-\beta/2 [\Omega_{j,r}]} (aka add [\overline{\Theta_{j,r}}] to [\Lambda_j] + for(int i = j-lenl; i>=0; i--) ds2->AddElement((-beta/2)*(d->z[i]/(-beta))); + // sum over r + for(int r = 0; r <= j-lenl; r++){ + // build e^{-\beta/2 [\Theta_{j,r}]} + //for(int i = 0; i <= r; i++) ds2->AddElement((-beta/2)*(d->z[i]/(-beta))); + ds1->AddElement((-beta/2)*(d->z[r]/(-beta))); + inner_prefac = prefac*(currMDl_trace[j]/factorial[lenl])*(currD_partial[j-lenl]/currD_partial[j])*(ds1->divdiffs[r]*beta_div2_pow_factorial[r]).get_double(); + // add (beta/2)*e^{-beta/2[\Omega_{j,r}]} contribution + T += inner_prefac*(beta/2)*(ds2->divdiffs[q+1-lenl-r]*beta_div2_pow_factorial[q+1-lenl-r]).get_double(); + // add N([\overline{\theta}_{j,r}]) e^{-beta/2[\Omega_{j,r}] + {0}} contribution + ds2->AddElement(0); + T += inner_prefac*(j-lenl-r+1.0)*(ds2->divdiffs[q+2-lenl-r]*beta_div2_pow_factorial[q+2-lenl-r]).get_double(); + // add \sum_u contribution + for(int u = r; u<= j-lenl; u++){ + ds2->AddElement((-beta/2)*(d->z[u]/(-beta))); + T += inner_prefac*(d->z[u]/(-beta))*(ds2->divdiffs[q+3-lenl-r]*beta_div2_pow_factorial[q+3-lenl-r]).get_double(); + ds2->RemoveElement(); + } + ds2->RemoveElement(); // removes AddElement(0) + ds2->RemoveElement(); // removes AddElement((-beta/2)*(d->z[r]/(-beta))); + } + } + } + // add (k, l=0) and (k, l) contributions + for(k=0;kq) continue; + if(!NoRepetitionCheck(Sq+(q-lenk),lenk)) continue; + cont = 0; for(i=0;idivdiffs[q]*beta_pow_factorial[q]).get_double()); + for (int j = 0; j <= q-lenk; j++){ + ds1->CurrentLength=0; ds2->CurrentLength=0; // reset scratch divdiffs + // build e^{-\beta/2 [\Lambda_j]} + for(int i = j; i <= q-lenk; i++) ds2->AddElement((-beta/2)*(d->z[i]/(-beta))); + // build e^{-\beta/2 [\Omega_{j,r}]} (aka add [\overline{\Theta_{j,r}}] to [\Lambda_j] + for(int i = j; i>=0; i--) ds2->AddElement((-beta/2)*(d->z[i]/(-beta))); + // sum over r + for(int r = 0; r <= j; r++){ + // build e^{-\beta/2 [\Theta_{j,r}]} + //for(int i = 0; i <= r; i++) ds2->AddElement((-beta/2)*(d->z[i]/(-beta))); + ds1->AddElement((-beta/2)*(d->z[r]/(-beta))); + inner_prefac = prefac*currMD0_trace[j]*(currD_partial[q-lenk]/currD)*(ds1->divdiffs[r]*beta_div2_pow_factorial[r]).get_double(); + // add (beta/2)*e^{-beta/2[\Omega_{j,r}]} contribution + T += inner_prefac*(beta/2)*(ds2->divdiffs[q+1-lenk-r]*beta_div2_pow_factorial[q+1-lenk-r]).get_double(); + // add N([\overline{\theta}_{j,r}]) e^{-beta/2[\Omega_{j,r}] + {0}} contribution + ds2->AddElement(0); + T += inner_prefac*(j-r+1.0)*(ds2->divdiffs[q+2-lenk-r]*beta_div2_pow_factorial[q+2-lenk-r]).get_double(); + // add \sum_u contribution + for(int u = r; u<= j; u++){ + ds2->AddElement((-beta/2)*(d->z[u]/(-beta))); + T += inner_prefac*(d->z[u]/(-beta))*(ds2->divdiffs[q+3-lenk-r]*beta_div2_pow_factorial[q+3-lenk-r]).get_double(); + ds2->RemoveElement(); + } + ds2->RemoveElement(); // removes AddElement(0) + ds2->RemoveElement(); // removes AddElement((-beta/2)*(d->z[r]/(-beta))); + } + } + // compute \sum_l (k, l) Leibniz rule contribution + for(l=0;lq) continue; + calc_MD_trace_l(n ,l); + prefac = -(Akz / factorial[lenk]) / ((d->divdiffs[q]*beta_pow_factorial[q]).get_double()); + for (int j = lenl; j <= q-lenk; j++){ + // remainder of 1^{(j)}_{\tilde{P}_l}(S_{i_q}) condition + if(!NoRepetitionCheck(Sq+(j-lenl),lenl)) continue; + cont = 0; for(i=0;iCurrentLength=0; ds2->CurrentLength=0; // reset scratch divdiffs + // build e^{-\beta/2 [\Lambda_j]} + for(int i = j; i <= q-lenk; i++) ds2->AddElement((-beta/2.0)*(d->z[i]/(-beta))); + // build e^{-\beta/2 [\Omega_{j,r}]} (aka add [\overline{\Theta_{j,r}}] to [\Lambda_j] + for(int i = j-lenl; i>=0; i--) ds2->AddElement((-beta/2.0)*(d->z[i]/(-beta))); + // sum over r + for(int r = 0; r <= j-lenl; r++){ + // build e^{-\beta/2 [\Theta_{j,r}]} + //for(int i = 0; i <= r; i++) ds2->AddElement((-beta/2)*(d->z[i]/(-beta))); + ds1->AddElement((-beta/2.0)*(d->z[r]/(-beta))); + inner_prefac = prefac*(currMDl_trace[j]/factorial[lenl])*(currD_partial[j-lenl]/currD)*(currD_partial[q-lenk]/currD_partial[j]) * + (ds1->divdiffs[r]*beta_div2_pow_factorial[r]).get_double(); + // add (beta/2)*e^{-beta/2[\Omega_{j,r}]} contribution + T += inner_prefac*(beta/2.0)*(ds2->divdiffs[q+1-lenl-lenk-r]*beta_div2_pow_factorial[q+1-lenl-lenk-r]).get_double(); + // add N([\overline{\theta}_{j,r}]) e^{-beta/2[\Omega_{j,r}] + {0}} contribution + ds2->AddElement(0); + T += inner_prefac*(j-lenl-r+1.0)*(ds2->divdiffs[q+2-lenl-lenk-r]*beta_div2_pow_factorial[q+2-lenl-lenk-r]).get_double(); + // add \sum_u contribution + for(int u = r; u<= j-lenl; u++){ + ds2->AddElement((-beta/2.0)*(d->z[u]/(-beta))); + T += inner_prefac*(d->z[u]/(-beta))*(ds2->divdiffs[q+3-lenl-lenk-r]*beta_div2_pow_factorial[q+3-lenl-lenk-r]).get_double(); + ds2->RemoveElement(); + } + ds2->RemoveElement(); // removes AddElement(0) + ds2->RemoveElement(); // removes AddElement((-beta/2)*(d->z[r]/(-beta))); + } + } + } + + } + R = (currD*T).real()/currD.real(); + break; + } +#endif + } else switch(n-Nobservables){ + case 0: R = measure_H(); break; + case 1: R = measure_H2(); break; + case 2: R = measure_Hdiag(); break; + case 3: R = measure_Hdiag2(); break; + case 4: R = measure_Hoffdiag(); break; + case 5: R = measure_Hoffdiag2(); break; + case 6: R = measure_Z_magnetization(); break; + case 7: R = measure_Hdiag_corr(); break; + case 8: R = measure_Hdiag_int1(); break; + case 9: R = measure_Hdiag_int2(); break; + case 10: R = measure_Hdiag_int3(); break; + case 11: R = measure_Hoffdiag_corr(); break; + case 12: R = measure_Hoffdiag_int1(); break; + case 13: R = measure_Hoffdiag_int2(); break; + case 14: R = measure_Hoffdiag_int3(); break; + } + return R; +} + +void measure(){ + double R, sgn; int i; + currWeight = GetWeight(); sgn = currWeight.sgn(); + meanq += q; if(maxq < q) maxq = q; in_bin_sum_sgn += sgn; + if((measurement_step+1) % bin_length == 0){ + in_bin_sum_sgn /= bin_length; bin_mean_sgn[measurement_step/bin_length] = in_bin_sum_sgn; in_bin_sum_sgn = 0; + } + for(i=0;i +#include +#include +#include +#include +#include +#include +#include + +using namespace std; + +#define dbitset vector + +int no_qubit = 0; // number of qubits is a global variable! + +// ***************************** Functions ******************************************* // +template +void printMatrix(const vector>& matrix) { + int m = matrix.size(); + int n = matrix[0].size(); + + for (int i = 0; i < m; i++) { + for (int j = 0; j < n; j++) { + cout << matrix[i][j] << " "; + } + cout << endl; + } +} + +// This function is for binary addition (XOR) +vector GF2_add(vector vec1 , vector vec2){ + vector result; + if (vec1.size() != vec2.size()){ + cerr << "The size of the added vectors are differen!" << endl; + } + + for(int i = 0; i < vec1.size(); i++){ + result.push_back((vec1[i] + vec2[i])%2); + } + return result; +} + +// Function to compute the mod 2 nullspace +vector> Null2(const vector>& matrix) { + vector> nullspaceBasis; + int numRows = matrix.size(); int numCols; + if(numRows==0) return nullspaceBasis; else numCols = matrix[0].size(); + vector> matrix_RE = matrix; // Copy the original matrix + vector nullspaceVector(numCols, 0); + vector marked_rows; + + // Gaussian Elimination (GF(2)) to obtain matrix_GE + for(int j = 0; j < numCols; j++){ + for(int i=0; i < numRows; i++){ + if(matrix_RE[i][j] == 1){ + marked_rows.push_back(i); + for(int k = 0; k < numCols; k++ ){ + if(k != j){ + if(matrix_RE[i][k] == 1){ + // Adding column j to k! + for(int l = 0; l < matrix_RE.size(); l++){ + matrix_RE[l][k] = matrix_RE[l][j] ^ matrix_RE[l][k]; + } + } + } + } + break; + } + } + } + // sort the marked row + std::sort(marked_rows.begin(), marked_rows.end()); + + // Remove consecutive duplicates (taking only unique marked rows!) + auto it = unique(marked_rows.begin(), marked_rows.end()); + marked_rows.erase(it, marked_rows.end()); + + vector> marked_matrix; + // Make the marked matrix: + for(int i = 0 ; i < marked_rows.size() ; i++){ + marked_matrix.push_back(matrix_RE[marked_rows[i]]); + } + + for(int i = 0; i < numRows; i++){ + auto iter = find(marked_rows.begin(), marked_rows.end(), i); + if(iter == marked_rows.end()){ // Finding unmarked (independent) rows + // Step 1 - Then find the ones in that row! + // 2 - Go over each one found in that row and find the corresponding + // row which has that that index of one in that column + + // Step 1 : + vector row_i = matrix_RE[i] , nullvector_j(numRows, 0); + nullvector_j[i] = 1; + for(int j = 0; j < numCols; j++){ + if(row_i[j] == 1){ + // Go over the marked rows and extract the null space: + for(int k = 0; k < marked_matrix.size(); k++){ + if(marked_matrix[k][j] == 1){ + nullvector_j[marked_rows[k]] = 1; + } + } + } + } + nullspaceBasis.push_back(nullvector_j); + } + } + return nullspaceBasis; +} + +int Int_sum(vector vec){ + int sum = 0; + for(int i = 0; i < vec.size(); i++){ + sum += vec[i]; + } + return sum; +} + +// This function minimizes the size of the cycles (nullspace basis vectors with least 1s): +bool compare_null(const vector & a, const vector & b){ + return Int_sum(a) < Int_sum(b); +} + +int cycle_minimize(vector>& null_eigs){ + int nullsize, k, m, null_k, changes_made = 0; vector curr; + nullsize = null_eigs.size(); + sort(null_eigs.begin(), null_eigs.end(), compare_null); + for(k = nullsize-1; k > 0 ; k--){ + null_k = Int_sum(null_eigs[k]); + for(m = 0 ; m < k; m++){ + curr = GF2_add( null_eigs[k] , null_eigs[m]); + if(Int_sum(curr) < null_k){ + null_eigs[k] = curr; changes_made = 1; + break; + } + } + } + return changes_made; +} + +// This function minimizes the size of the cycles (nullspace basis vectors with least 1s): +// This Eig_minimize is specifically designed for operator permutations, and functions differently +// than the one in Null_Generator, which is used to find closed cycles of minimum length 3. +int Eig_minimize(vector>& nullEigs){ + int changes_made = 0; + int nullsize = nullEigs.size() , noops = nullEigs[0].size(); //noops is the number of operators + + vector nulln(nullsize); + for(int i = 0; i < nullsize; i++){ + nulln[i] = Int_sum(nullEigs[i]); + } + auto mincyc = min_element(nulln.begin(), nulln.end()); + + vector nullEigsMinind , nullEigsHighind, nullEigsOnesind; // the indices of eigenvectors ending with 1 + + for(int i = 0; i < nullsize; i++){ + if(nulln[i] == *mincyc){ + nullEigsMinind.push_back(i); + } + else if(nullEigs[i][noops-1] == 1){ + nullEigsOnesind.push_back(i); + } + else{ + nullEigsHighind.push_back(i); + } + } + int highsize = nullEigsHighind.size(); + int minsize = nullEigsMinind.size(); + + for(int i=0; i < minsize; i++){ + int vecisum = Int_sum(nullEigs[nullEigsMinind[i]]); + vector bestVec = nullEigs[nullEigsMinind[i]]; + for(int j=0; j < minsize; j++){ + if(j!= i){ + vector tryVecij = GF2_add(nullEigs[nullEigsMinind[i]] , nullEigs[nullEigsMinind[j]]); + int tryvecijsum = Int_sum(tryVecij); + if(tryvecijsum < vecisum){ + vecisum = tryvecijsum; + bestVec = tryVecij; + } + } + } + if(nullEigs[nullEigsMinind[i]] != bestVec){ nullEigs[nullEigsMinind[i]] = bestVec; changes_made = 1;} + } + + for(int k = 0; k < nullEigsOnesind.size(); k++){ + int oneskind = nullEigsOnesind[k]; + vector highEigk = nullEigs[oneskind]; + vector bestEigk = highEigk; + int bestnullk = Int_sum(highEigk); + for(int l = 0; l < nullsize; l++){ + if(l != oneskind){ + vector highTomin = GF2_add(highEigk , nullEigs[l]); + int lowk = Int_sum(highTomin); + if(lowk < 3){ + bestEigk = highTomin; + bestnullk = lowk; + break; + } + else if(lowk < bestnullk){ + bestEigk = highTomin; + bestnullk = lowk; + } + } + } + if(nullEigs[oneskind] != bestEigk){ nullEigs[oneskind] = bestEigk; changes_made = 1;} + } + return changes_made; +} + +// This function converts the array of integers into a corresponding binary string (used to convert the indices of Z into string of bitsets) +string int_to_str(vector Z){ + string Z_string = ""; + if(Z.size() < 1){ + return "0"; + } + std::sort(Z.begin() , Z.end()); + int Z_size = Z.size(); + int count = 1, ind_z_count = 0, max_z = Z[Z_size-1]; + + while(ind_z_count < Z_size){ + if(Z[ind_z_count] < count){ + cout << endl << "Error: repeating spin indices are detected in a Pauli string" << endl; exit(1); + } else if(Z[ind_z_count] == count){ + Z_string = "1" + Z_string; + ind_z_count++; + } + else{ + Z_string = "0" + Z_string; + } + count++; + } + return Z_string; +} + +// This function extracts the input file information into a vector of pairs! +vector, vector>> data_extract(const string& fileName){ + vector, vector>> data; + + ifstream inputFile(fileName); + if (!inputFile) { + cout << "Failed to open the input file!" << endl; + return data; + } + + string line; + while (getline(inputFile, line)) { + // The first non-empty element is the coefficient + istringstream iss(line); + pair,vector> linedata; + + // Extracting the complex coefficient: + double realpart, imagpart=0; + char sign; + string complexPart; + iss >> complexPart; + + istringstream complexIss(complexPart); + complexIss >> realpart >> imagpart; + linedata.first = complex (realpart , imagpart); + + if(linedata.first == 0.0) continue; // if the coefficient is equal to zero, the line is ignored + + //Extracting the integer vectors of qubits and paulis: + string token; + vector integers; + while (iss >> token){ + // int num = std::stoi(token); + int num = token == "X" || token == "x" ? 1 : + token == "Y" || token == "y" ? 2 : + token == "Z" || token == "z" ? 3 : std::stoi(token); + integers.push_back(num); + } + linedata.second = integers; + + data.push_back(linedata); + } + inputFile.close(); + return data; +} + +// Finding bitset in a vector of bitsets: +// The max bitset will be 64 (the maximum number of qubits), and we will cut off accordingly +// ..... when the number of qubits of the system is less than this number. +pair bit_is_in_set(dbitset bitstring, vector bitsetVector) { + bool found = false; + int found_indx = 0, ind_count = 0; + pair output; + for (const auto& bitset : bitsetVector) { + if (bitset == bitstring) { + found = true; + found_indx = ind_count; + break; + } + ind_count++; + } + output.first = found; + output.second = found_indx; + return output; +} + +// This function downsizes the vector of bitsets from dbitset to bitset to avoid +// ..... having redundant zeros. +vector> downsize_bitset(vector bitsetVector){ + extern int no_qubit; + vector> bitset_down; + for(const auto& bitset : bitsetVector){ + vector bitset_vector; + for(int i = 0; i < no_qubit; i++){ + bitset_vector.push_back(i < bitset.size() && bitset[i]); + } + reverse(bitset_vector.begin() , bitset_vector.end()) +; bitset_down.push_back(bitset_vector); + } + return bitset_down; +} + +vector> bit_to_intvec(vector> bitsetVec){ + extern int no_qubit; + vector> bit_int; + for(auto const& bitset : bitsetVec){ + vector bit_int_i; + for(int i=0; i < no_qubit; i++){ + bit_int_i.push_back(i < bitset.size() && bitset[i]); + } + reverse(bit_int_i.begin() , bit_int_i.end()); + bit_int.push_back(bit_int_i); + } + + return bit_int; +} + +// ------------- Functions to compute the Zs and Ps and coefficients ---------- +struct BitsetComparator { + bool operator()(const dbitset& lhs, const dbitset& rhs) const { + int i = lhs.size()-1, j = rhs.size()-1, ans = 0; + while(i >= 0 && lhs[i] == 0) i--; + while(j >= 0 && rhs[j] == 0) j--; + if(i < j) ans = 1; else if(i == j){ + while(i >= 0 && lhs[i] == rhs[i]) i--; + if(i >= 0 && lhs[i] < rhs[i]) ans = 1; + } + return ans; + // return lhs.to_ulong() < rhs.to_ulong(); + } +}; // This struct is for bitset comparison (in order to sort the bitsets) + +typedef vector> Coeffs; +typedef vector> ZVecs; +struct PZdata { + vector Ps; + Coeffs coeffs; + ZVecs Zs; + ZVecs Z_track; +}; + +PZdata PZcomp(const vector,vector>>& data) { + PZdata PZ_data; + int l = data.size(),z_count = 0; + extern int no_qubit; + vector Ps; + Coeffs coeffs; + ZVecs Zs; + ZVecs Z_track; //This vector maps the Zs to Ps: it is a many to one mapping! + + for (int i = 0; i coeff_i = data[i].first; + vector zs_i; // For every line zs extracts the qubits on which a pauli Z acts! + vector data_i = data[i].second; // Extracts the array of qubits and paulis for every line of input! + dbitset bit_num; // This variable keeps track of the index of the permutation matrix we get for each line of data! + + for (size_t j = 0; j < data_i.size() / 2; j++) { + // Format of the input file: The 1st, 3rd, 5th, ... indicate the qubits + int qubit = data_i[2 * j]; + // Format of the input file: The 2nd, 4th, 6th, ... indicate the paulis + int pauli_j = data_i[2 * j + 1]; + + if (qubit <= 0){ + cout << endl << "Error: " << qubit << " is incorrect spin index. Spin indices must be positive integers." << endl; exit(1); + } + if (qubit > no_qubit) no_qubit = qubit; + if (pauli_j == 1) { + if(qubit > bit_num.size()) bit_num.resize(qubit); + bit_num[qubit-1] = 1; + // bit_num.set(qubit-1, true); + // Add a 1 in the num-th position from the right of the bit string + } else if (pauli_j == 2) { + if(qubit > bit_num.size()) bit_num.resize(qubit); + //cpp_int perm_term = 1 << (qubit - 1); + //num += perm_term; + bit_num[qubit-1] = 1; + // bit_num.set(qubit-1, true); + coeff_i *= complex(0, 1); + zs_i.push_back(qubit); + } else if (pauli_j == 3) { + zs_i.push_back(qubit); + } + } + coeffs.push_back(coeff_i); + Zs.push_back(zs_i); // If zs is empty then no non-trivial diagonal components! (All identity operators) + + // Look for num in the previous list of Ps (permutations) + pair bit_in_set = bit_is_in_set(bit_num , Ps); + if(!bit_in_set.first){ + // num was not found in Ps, thus a new permutation matrix! + Ps.push_back(bit_num); + // We will add i-th element of coeffs and Zs to be associated with the current Ps! + Z_track.push_back(vector {i}); + }else { + // num was found in Ps, and P_index will be the index of Ps that matched num. + int P_index = bit_in_set.second; + + vector z_indices = Z_track[P_index]; + + bool z_found = false; + for (int k = 0; k < z_indices.size(); k++){ + if (Zs[z_indices[k]] == zs_i){ + coeffs[P_index] += coeff_i; + z_found = true; + break; + } + } + // If the z array is new, we add it to the Z_track for the Ps associated with num! + if (!z_found){ + Z_track[P_index].push_back(i); + } + } + } + + // Throw away the zero coefficients: + vector Ps_kept; + ZVecs Z_track_kept; + + for(int k = 0; k < Z_track.size(); k++){ + vector ztrack_k; + for(int l = 0; l < Z_track[k].size(); l++){ + int k_l_index = Z_track[k][l]; + complex coeff_k_l = coeffs[k_l_index]; + if (abs(coeff_k_l) > 1e-8){ + //zero_coeffs_for_k.push_back(l); + ztrack_k.push_back(k_l_index); + } + } + if(ztrack_k.size() > 0){ + Z_track_kept.push_back(ztrack_k); + Ps_kept.push_back(Ps[k]); + } + } + + // Sorting everything based on indices of Ps: + vector indices; + for(int i = 0; i < Ps.size(); i++){ + indices.push_back(i); + } + sort(indices.begin(), indices.end(), [&](size_t a, size_t b) { + return BitsetComparator()(Ps_kept[a], Ps_kept[b]); + }); + + + vector Ps_sorted; + ZVecs Z_track_sorted; + for(int i = 0; i < Ps_kept.size(); i++){ + Ps_sorted.push_back(Ps_kept[indices[i]]); + Z_track_sorted.push_back(Z_track_kept[indices[i]]); + } + + PZ_data.coeffs = coeffs; //Coeffs and Zs are kept as the original data + PZ_data.Ps = Ps_sorted; + PZ_data.Zs = Zs; + PZ_data.Z_track = Z_track_sorted; + + return PZ_data; +} + +int none(dbitset v){ + return std::all_of(v.begin(), v.end(), [](int i) { return i==0; }); +} + +ofstream output; + +void output_complex(complex z){ + if(abs(z.imag()) < 1E-8) output << z.real(); else output << "{" << z.real() << "," << z.imag() << "}"; +} + +void main1(int argc , char* argv[]){ + string fileName(argv[1]); // Reading the name of the input .txt file describing the Hamiltonian + vector, vector>> data = data_extract(fileName); + + // Unpacking the data from the input file "fileName" + PZdata PZ_data = PZcomp(data); + vector Ps_bit = PZ_data.Ps; + vector> Ps = downsize_bitset(Ps_bit); + vector> coefficients = PZ_data.coeffs; + vector> Z_track = PZ_data.Z_track; + vector> Zs = PZ_data.Zs; + vector Zs_string; + bool D0_exists = false; + //int no_qubit = PZ_data.no_qubit; + + // Converting the Z indices into string of bitsets! + for(int i = 0; i < Zs.size();i++){ + Zs_string.push_back(int_to_str(Zs[i])); + } + + // Convert Ps indices into vector of ints + vector> Ps_nontrivial = Ps; + if(none(Ps_bit[0])){ + Ps_nontrivial.erase(Ps_nontrivial.begin()); + D0_exists = true; + } + + // Minimizing the size of the fundamental cycless + vector> Ps_binary = bit_to_intvec(Ps_nontrivial); + vector> nullspace = Null2(Ps_binary); + while(cycle_minimize(nullspace)); + int no_ps = Ps_binary.size(); + int nullity = nullspace.size(); + + //string output_h = fileName.substr(0, fileName.find_last_of(".")) + ".h"; + // string output_h = "hamiltonian.hpp"; + // ofstream output(output_h); + + // ********************************************************************** // + // -------------------- Creating the the .h file ------------------------ // + if(output.is_open()){ + int actually_complex; + output << "#define N " << no_qubit << endl; + output << "#define Nop " << no_ps << endl; + output << "#define Ncycles " << nullity << endl; + output << endl; + + // ---------------- Permutation matrices and cycles --------------------- // + // The permutation bitsets + output << "std::bitset P_matrix[Nop] = {"; + for(int i = 0; i < no_ps; i++){ + output << "std::bitset(\""; + for(int j=0; j < Ps_nontrivial[i].size(); j++){ + output << Ps_nontrivial[i][j]; + } + output << "\")"; + if(i < no_ps - 1){ + output << ", "; + } + } + output << "};" << endl; + + // The cycle bitsets + output << "std::bitset cycles[Ncycles] = {"; + for(int i = 0; i < nullity; i++){ + output << "std::bitset(\""; + for(int j=no_ps-1; j >= 0; j--){ + output << nullspace[i][j]; + } + output << "\")"; + if(i < nullity-1){ + output << ", "; + } + } + output << "};" << endl; + output << endl; + + // ---------------------------------------------------------------------- // + // -------------------------- Diagonal terms ---------------------------- // + if(D0_exists){ + output << "const int D0_size = " << Z_track[0].size() << ";" << endl; + + actually_complex = 0; + for(int i=0;i "; else output << "double "; + output << "D0_coeff[D0_size] = {"; + for(int i=0;i D0_product[D0_size] = {"; + for(int i = 0; i < Z_track[0].size(); i++){ + vector Zs_i = Zs[Z_track[0][i]]; + output << "std::bitset(\"" << int_to_str(Zs_i) << "\")"; + if(i < Z_track[0].size() - 1){ + output << ", "; + } + } + output << "};" << endl; + } + else{ + output << "const int D0_size = 0;" << endl; + output << "double D0_coeff[D0_size];" << endl; + output << "std::bitset D0_product[D0_size];" << endl; + } + + output << endl; + + // ------------------------------------------------------------------------ // + // --------------------------- Off-Diagonal terms ------------------------- // + // Finding D_maxsize: + int D_max = 0 , D_start; + vector D_size; + if(Ps_nontrivial.size() == Ps.size()) // In case of no diagonal term (i.e. D_0 = 0) + { + D_start = 0; + }else{ + D_start = 1; + } + for(int i = D_start; i < Z_track.size(); i++){ + int z_size_i = Z_track[i].size(); + D_size.push_back(z_size_i); + if(z_size_i > D_max){ + D_max = z_size_i; + } + } + output << "const int D_maxsize = " << D_max << ";" << endl; + output << "int D_size[Nop] = {"; + for(int i=0;i "; else output << "double "; + output << "D_coeff[Nop][D_maxsize] = {"; + for(int i = D_start; i < Z_track.size() ; i++){ + output << "{"; + for(int j = 0; j < Z_track[i].size(); j++){ + output_complex(coefficients[Z_track[i][j]]); + if(j < Z_track[i].size() - 1) output << ", "; + } + output << "}"; + if(i < Z_track.size()-1){ + output << ", "; + } + } + output << "};" << endl; + output << "std::bitset D_product[Nop][D_maxsize] = {"; + for(int i = D_start; i < Z_track.size() ; i++){ + output << "{"; + for(int j = 0; j < Z_track[i].size(); j++){ + vector z_ij = Zs[Z_track[i][j]]; + output << "std::bitset(\"" << int_to_str(z_ij) << "\")"; + if(j < Z_track[i].size() - 1){ + output << ", "; + } + } + output << "}"; + if(i < Z_track.size()-1){ + output << ", "; + } + } + output << "};" << endl; + + if(no_ps == 0) output << endl << "#pragma GCC diagnostic ignored \"-Wdiv-by-zero\"" << endl; + + // output.close(); + } +} + +template +void PrintListPure(ofstream& output, vector list, int len){ + output << "{"; + for(int i=0;i +void PrintList(ofstream& output, vector list, int len, string namelist){ + output << namelist << " = "; + PrintListPure(output, list, len); + output << ";" << endl; +} + +void main2(int argc , char* argv[]){ + //int Nobservables = 2*(argc - 2); + int Nobservables = argc - 2; + vector, vector>>> Op_data(Nobservables); + + string Ham_fileName(argv[1]); // Reading the name of the input .txt file describing the Hamiltonian + vector, vector>> Ham_data = data_extract(Ham_fileName); + + vector Op_fileNames(Nobservables); + for(int O=0;O Ps_bit_Ham = PZ_data_Ham.Ps; + vector> Ps_Ham = downsize_bitset(Ps_bit_Ham); + vector> coefficients_Ham = PZ_data_Ham.coeffs; + vector> Z_track_Ham = PZ_data_Ham.Z_track; + vector> Zs_Ham = PZ_data_Ham.Zs; + vector Zs_string_Ham; + + // Unpacking the Operator data from the input files + vector PZ_data_Op(Nobservables); + vector> Ps_bit_Op(Nobservables); + vector>> Ps_Op(Nobservables); + vector>> coefficients_Op(Nobservables); + vector>> Z_track_Op(Nobservables); + vector>> Zs_Op(Nobservables); + vector> Zs_string_Op(Nobservables); + bool D0_exists = false; + vector P0_exists; P0_exists.resize(Nobservables, false); + + for(int O=0;O> Ps_Ham_nontrivial = Ps_Ham; + if(none(Ps_bit_Ham[0])){ + Ps_Ham_nontrivial.erase(Ps_Ham_nontrivial.begin()); + D0_exists = true; + } + + // Convert Ps indices into vector of bools + vector>> Ps_Op_nontrivial(Nobservables); + for(int O=0;O> Ps_binary_Ham = bit_to_intvec(Ps_Ham_nontrivial); + vector>> Ps_binary_Op(Nobservables); + for(int O=0;O no_ops(Nobservables); + for(int O=0;O>> Op_to_Ham(Nobservables); + vector zero_vector; + for(int i = 0; i < no_ps; i++){ + zero_vector.push_back(0); + } + + for(int O=0;O> Ps_bin_total = Ps_binary_Ham, nullspace_k; + bool permutation_found = false; + Ps_bin_total.push_back(Ps_binary_Op[O][k]); + nullspace_k = Null2(Ps_bin_total); + while(Eig_minimize(nullspace_k)); + int min_ops=100000 , min_index; + for(int i = 0; i< nullspace_k.size(); i++){ + int sum_ops = 0; + if(nullspace_k[i][no_ps] == 1){ + permutation_found = true; + sum_ops = Int_sum(nullspace_k[i]); + if(sum_ops < min_ops){ + min_ops = sum_ops; + min_index = i; + } + } + } + if(permutation_found){ + Op_to_Ham[O].push_back(nullspace_k[min_index]); + } + else{ + Op_to_Ham[O].push_back(zero_vector); + } + } + + // string output_operator = "observables.hpp"; + // ofstream output(output_operator); + + vector> rel_perms(Nobservables); + vector non_triv_offdiags_exists; non_triv_offdiags_exists.resize(Nobservables,false); + + for(int O=0;O 0){ + rel_perms[O].push_back(i); + } + } + // Delete the Z_tracks of the trivial permutations: + vector>> Z_track_Op_kept(Nobservables); + + for(int O=0;O MP[Nobservables][MNop_max] = {"; + + for(int O=0; O(\""; + for(int j=no_ps-1; j >= 0; j--){ + output << Op_to_Ham[O][rel_perms[O][i]][j]; + } + output << "\")"; + if(i < no_ops[O]-1){ + output << ", "; + } + } + output << "}"; + if(O MD0_size(Nobservables); for(int O=0; O "; else output << "double "; + output << "MD0_coeff[Nobservables][MD0_maxsize] = {"; + + for(int O=0; O MD0_product[Nobservables][MD0_maxsize] = {"; + + for(int O=0; O(\"" << Zs_string_Op[O][Z_track_Op[O][0][i]] << "\")"; + if(i < Z_track_Op[O][0].size() - 1){ + output << ", "; + } + } + output << "}"; if(O> MD_size; MD_size.resize(Nobservables,vector(no_ops_max)); + vector MD_maxsize; MD_maxsize.resize(Nobservables,0); + + for(int O=0; O D_size; + for(int i = 0; i < no_ops[O]; i++){ + int z_size_i = Z_track_Op_kept[O][i].size(); + D_size.push_back(z_size_i); + if(z_size_i > D_max){ + D_max = z_size_i; + } + } + MD_maxsize[O] = D_max; + for(int i=0;i "; else output << "double "; + output << "MD_coeff[Nobservables][MNop_max][MD_Maxsize] = {"; + + for(int O=0; O MD_product[Nobservables][MNop_max][MD_Maxsize] = {"; + for(int O=0; O(\"" << Zs_string_Op[O][Z_track_Op_kept[O][i][j]] << "\")"; + if(j < Z_track_Op_kept[O][i].size() - 1){ + output << ", "; + } + } + output << "}"; + if(i < Z_track_Op_kept[O].size()-1){ + output << ", "; + } + } + output << "}"; if(O= 3){ + cout << "Preparing PMR for the observables and their representation via the permutation operators of the Hamiltonian..."; + main2(argc, argv); + cout << "done" << endl; + } + output.close(); + return 0; +} diff --git a/utils/_pauli_manipulations_test.py b/utils/_pauli_manipulations_test.py new file mode 100644 index 0000000..b4b0d92 --- /dev/null +++ b/utils/_pauli_manipulations_test.py @@ -0,0 +1,309 @@ +""" +// +// This program contains unit tests for the pauli_manipulations.py file to support code used in: +// Nic Ezzell, Lev Barash, Itay Hen, Exact and universal quantum Monte Carlo estimators for energy susceptibility and fidelity susceptibility. +// +// +""" +# %% +import unittest +import numpy as np +from pauli_manipulations import PauliTerm, PauliH, PauliU + +class PauliTermTest(unittest.TestCase): + """ Unit test cases for PauliTerm """ + + ################################################## + # Init, str, equals, to_pmr_line + ################################################## + def test_init_and_str(self): + x1 = PauliTerm(1.0, [1], ['X'], 1) + self.assertEqual("X", x1.__str__()) + y2 = PauliTerm(-0.3*1j, [2], ['Y'], 3) + self.assertEqual("(-0-0.3j)*IYI", y2.__str__()) + xyz = PauliTerm(0.1-0.3*1j, [1,3,4], ['X','Y','Z'], 6) + self.assertEqual("(0.1-0.3j)*XIYZII", xyz.__str__()) + + def test_equals(self): + # basic 1q tests + op1 = PauliTerm(1.0, [1], ['X'], 1) + op2 = PauliTerm(1.0, [1], ['X'], 1) + self.assertEqual(op1 == op2, True) + op2 = PauliTerm(0.9, [1], ['X'], 1) + self.assertEqual(op1 == op2, False) + op2 = PauliTerm(1.0, [1], ['X'], 2) + self.assertEqual(op1 == op2, False) + # 3q tests + op1 = PauliTerm(1.0, [1, 2], ['X', 'Y'], 3) + op2 = PauliTerm(1.0, [1, 2], ['X', 'Y'], 3) + self.assertEqual(op1 == op2, True) + op2 = PauliTerm(1.0, [1, 3], ['X', 'Y'], 3) + self.assertEqual(op1 == op2, False) + op2 = PauliTerm(1.0, [1, 2], ['X', 'Z'], 3) + self.assertEqual(op1 == op2, False) + op2 = PauliTerm(1.0, [1, 2], ['X', 'Y'], 2) + self.assertEqual(op1 == op2, False) + + def test_to_pmr_str(self): + op = PauliTerm(0.3, [1,3,4], ['X','Y','Z'], 6) + pmr_str = "0.300000 1 X 3 Y 4 Z" + self.assertEqual(pmr_str, op.to_pmr_line()) + + ################################################## + # Operations + ################################################## + def test_mult(self): + op = PauliTerm(0.3, [1,3,4], ['X','Y','Z'], 6) + op = 1.73 * op + self.assertAlmostEqual(op.c, 1.73 * 0.3) + op = PauliTerm(-0.6, [1,3,4], ['X','Y','Z'], 6) + op = 3.14*1j * op + self.assertAlmostEqual(op.c, 3.14*1j * -0.6) + + def test_dagger(self): + op = PauliTerm(0.3*1j, [1,3,4], ['X','Y','Z'], 6) + conj = op.get_dagger() + self.assertAlmostEqual(conj.c, np.conj(op.c)) + + def test_bi_conj(self): + op1 = PauliTerm(0.3*1j, [1,3,4], ['X','Y','Z'], 6) + op2 = PauliTerm(1.25, [1, 5], ['Y', 'Z'], 6) + op3 = PauliTerm(-0.1*1j, [2, 3, 5, 6], ['Y', 'Y', 'X', 'Z'], 6) + conj_op = op1.bi_conj(op2, op3) + c = op1.c * op2.c * np.conj(op3.c) + answer = PauliTerm(c, [1,2,4,5,6], ['Z','Y','Z','Y','Z'], 6) + self.assertAlmostEqual(conj_op.c, answer.c) + self.assertEqual(conj_op.supp, answer.supp) + self.assertEqual(conj_op.ops, answer.ops) + # next example + op1 = PauliTerm(0.3*1j, [1,3,4], ['X','Y','Z'], 6) + op2 = PauliTerm(1.25, [1, 5], ['Y', 'Z'], 6) + op3 = PauliTerm(-0.1*1j, [2, 3, 6], ['Y', 'Y', 'Z'], 6) + conj_op = op1.bi_conj(op2, op3) + c = op1.c * op2.c * np.conj(op3.c) + answer = PauliTerm(-1j*c, [1,2,4,5,6], ['Z','Y','Z','Z','Z'], 6) + self.assertAlmostEqual(conj_op.c, answer.c) + self.assertEqual(conj_op.supp, answer.supp) + self.assertEqual(conj_op.ops, answer.ops) + +class PauliHTest(unittest.TestCase): + """ Unit test cases for PauliH """ + + ################################################## + # Test init, __str__, and __contains__ + ################################################## + def test_init_and_str(self): + # build H = X1 + \lam Z1 model + lam = 0.25 + x1 = PauliTerm(1.0, [1], ['X'], 3) + z1 = PauliTerm(1.0, [1], ['Z'], 3) + h = PauliH(3, [x1, lam * z1]) + h_str = "XII + 0.25*ZII" + self.assertEqual(h_str, h.__str__()) + # build 2q prl model + z1z2 = PauliTerm(1.0, [1, 2], ['Z','Z'], 2) + x1 = PauliTerm(1.0, [1], ['X'], 2) + x2 = PauliTerm(1.0, [2], ['X'], 2) + h0 = PauliH(2, [z1z2, 0.1 * x1, 0.1 * x2]) + h0_str = "ZZ + 0.1*XI + 0.1*IX" + self.assertEqual(h0_str, h0.__str__()) + z1 = PauliTerm(1.0, [1], ['Z'], 2) + z2 = PauliTerm(1.0, [2], ['Z'], 2) + h1 = PauliH(2, [z1, z2]) + h1_str = "ZI + IZ" + self.assertEqual(h1_str, h1.__str__()) + h = h0 + 0.9 * h1 + h_str = h0_str + " + 0.9*ZI + 0.9*IZ" + self.assertEqual(h_str, h.__str__()) + + def test_pmr_str(self): + # build 2q prl model + z1z2 = PauliTerm(1.0, [1, 2], ['Z','Z'], 2) + x1 = PauliTerm(1.0, [1], ['X'], 2) + x2 = PauliTerm(1.0, [2], ['X'], 2) + h0 = PauliH(2, [z1z2, 0.1 * x1, 0.1 * x2]) + z1 = PauliTerm(1.0, [1], ['Z'], 2) + z2 = PauliTerm(1.0, [2], ['Z'], 2) + h1 = PauliH(2, [z1, z2]) + h = h0 + 0.9 * h1 + pmr_str = "1.000000 1 Z 2 Z\n0.100000 1 X\n0.100000 2 X\n0.900000 1 Z\n0.900000 2 Z" + self.assertEqual(pmr_str, h.to_pmr_str()) + + def test_contains(self): + z1 = PauliTerm(1.0, [1], ['Z'], 2) + z2 = PauliTerm(1.0, [2], ['Z'], 2) + hz = PauliH(2, [z1, z2]) + self.assertEqual(z1 in hz, True) + self.assertEqual(z2 in hz, True) + op = PauliTerm(0.9, [1], ['Z'], 2) + self.assertEqual(op in hz, False) + x1 = PauliTerm(1.0, [1], ['X'], 2) + self.assertEqual(x1 in hz, False) + x2 = PauliTerm(1.0, [2], ['X'], 2) + hx = PauliH(2, [x1, x2]) + self.assertEqual(x1 in hx, True) + self.assertEqual(x2 in hx, True) + xx = PauliTerm(1.0, [1,2], ['X', 'X'], 2) + self.assertEqual(xx in hx, False) + + ################################################## + # Test add_term and simplify + ################################################## + def test_add_term_and_simplify(self): + # first suppress simplify with addition + h = PauliH(3) + x1 = PauliTerm(1.0, [1], ['X'], 3) + h.add_term(x1) + self.assertEqual(x1 in h, True) + h.add_term(x1, simplify=False) + self.assertEqual(x1 in h, True) + h.simplify() + self.assertEqual(x1 in h, False) + self.assertEqual(2 * x1 in h, True) + # now check that simplification occurs with init + h = PauliH(3, [x1, x1]) + self.assertEqual(x1 in h, False) + self.assertEqual(2 * x1 in h, True) + # now check that simplification works with add + h = PauliH(3) + h.add_term(x1) + h.add_term(x1) + self.assertEqual(x1 in h, False) + self.assertEqual(2 * x1 in h, True) + # check simplification with imaginary + op1 = PauliTerm(0.25+0.1*1j, [1, 3], ['X', 'Y'], 3) + op2 = PauliTerm(0.3-0.1*1j, [1, 3], ['X', 'Y'], 3) + op3 = PauliTerm(0.25, [2, 3], ['Z', 'Z'], 3) + h.add_term(op1, simplify=False) + h.add_term(op2, False) + h.add_term(op3, False) + h.simplify() + self.assertEqual(op3 in h, True) + self.assertEqual(op1 in h, False) + self.assertEqual(op1 in h, False) + op = PauliTerm(0.25+0.3, [1, 3], ['X', 'Y'], 3) + self.assertEqual(op in h, True) + +class PauliUTest(unittest.TestCase): + """ Unit test cases for PauliU """ + + ################################################## + # Test init, __str__, and __contains__ + ################################################## + def test_init_str_contains(self): + # default should have identity operator + u = PauliU(3) + id = PauliTerm(1.0, [], [], 3) + self.assertEqual(id in u, True) + self.assertEqual("III", u.__str__()) + # add terms that anti-commute + c = np.random.normal(0, 1, size=3) + c /= np.linalg.norm(c) + op1 = PauliTerm(c[0], [1,2,3], 3*['X'], 3) + u.add_term(op1) + op2 = PauliTerm(c[1], [1,2], ['X', 'Z'], 3) + u.add_term(op2) + op3 = PauliTerm(c[2], [1], ['Y'], 3) + u.add_term(op3) + self.assertEqual(u.validate_normalization(), True) + self.assertEqual(u.validate_anticommutation(), True) + self.assertEqual(op1 in u, True) + self.assertEqual(op2 in u, True) + self.assertEqual(op3 in u, True) + + ################################################## + # Test set_as_random and get_inverse + ################################################## + def test_set_as_random(self): + u = PauliU(3) + u.set_as_random(2*3 + 1) + ops = ['XXX', 'XXY', 'XXZ', 'XYI', 'XZI', 'YII', 'ZII'] + str_u = u.__str__() + for op in ops: + self.assertEqual(op in str_u, True) + u.set_as_random(3) + self.assertEqual(u.validate_anticommutation(), True) + self.assertEqual(u.validate_normalization(), True) + str_u = u.__str__() + count = 0 + for op in ops: + if op in str_u: + count += 1 + self.assertEqual(count, 3) + self.assertEqual(u.validate_anticommutation(), True) + self.assertEqual(u.validate_normalization(), True) + + def test_get_inverse(self): + u = PauliU(3) + u.set_as_random(2*3 + 1, eps=np.random.random()) + udg = u.get_inverse() + for pt in udg.terms: + conj_pt = PauliTerm(np.conj(pt.c), pt.supp, pt.ops, pt.n) + self.assertEqual(conj_pt in u, True) + + +class PauliUConjugateH(unittest.TestCase): + """ + Unit test cases for a PauliU object conjugating + a PauliH object. + """ + + def test_specific_rotation(self): + # build 2q prl model + n = 2 + z1z2 = PauliTerm(1.0, [1, 2], ['Z','Z'], n) + x1 = PauliTerm(1.0, [1], ['X'], n) + x2 = PauliTerm(1.0, [2], ['X'], n) + h0 = PauliH(n, [z1z2, 0.1 * x1, 0.1 * x2]) + z1 = PauliTerm(1.0, [1], ['Z'], n) + z2 = PauliTerm(1.0, [2], ['Z'], n) + h1 = PauliH(n, [z1, z2]) + h = h0 + 0.9 * h1 + # rotate it with a hand-crafted unitary + p1 = PauliTerm(0.3, [1,2], ['X', 'Y'], n) + p2 = PauliTerm(np.sqrt(1-0.3**2), [1], ['Z'], n) + u = PauliU(2, [p1, p2]) + uh = h.conjugate(u) + # form rotated h manually + a0 = 0.082 + a1 = 0.738 + a2 = 0.515127 + a3 = 0.0572364 + x1 = PauliTerm(-a0, [1], ['X'], n) + z1 = PauliTerm(a1, [1], ['Z'], n) + x2 = PauliTerm(a0, [2], ['X'], n) + yx = PauliTerm(a2, [1,2], ['Y', 'X'], n) + xy = PauliTerm(a2, [1,2], ['X', 'Y'], n) + zy = PauliTerm(a3, [1,2], ['Z', 'Y'], n) + z2 = PauliTerm(a1, [2], ['Z'], n) + yz = PauliTerm(-a3, [1,2], ['Y', 'Z'], n) + zz = PauliTerm(1.0, [1,2], ['Z', 'Z'], n) + man_h = PauliH(n, [x1, z1, x2, yx, xy, zy, z2, yz, zz]) + self.assertEqual(uh == man_h, True) + + def test_self_inverse(self): + n = 10 + # build 2q prl model + z1z2 = PauliTerm(1.0, [1, 2], ['Z','Z'], n) + x1 = PauliTerm(1.0, [1], ['X'], n) + x2 = PauliTerm(1.0, [2], ['X'], n) + h0 = PauliH(n, [z1z2, 0.1 * x1, 0.1 * x2]) + z1 = PauliTerm(1.0, [1], ['Z'], n) + z2 = PauliTerm(1.0, [2], ['Z'], n) + h1 = PauliH(n, [z1, z2]) + h = h0 + 0.9 * h1 + # generate random u and conjugate h + u = PauliU(n) + for _ in range(100): + l = np.random.choice(range(0, 2*n + 2)) + eps = np.random.random() + u.set_as_random(l, eps) + conj_h = h.conjugate(u) + conj2_h = conj_h.conjugate(u) + self.assertEqual(conj2_h, h) +# %% + +# %% +if __name__ == "__main__": + unittest.main() +#%% \ No newline at end of file diff --git a/utils/exact_calculations.py b/utils/exact_calculations.py new file mode 100644 index 0000000..d2ee40d --- /dev/null +++ b/utils/exact_calculations.py @@ -0,0 +1,165 @@ +""" +// +// This program contains code to do "exact numerical calculations" in support of: +// Nic Ezzell, Lev Barash, Itay Hen, Exact and universal quantum Monte Carlo estimators for energy susceptibility and fidelity susceptibility. +// +// +""" +# %% +################################################ +# Functions to compute exact values for PRL +# model: [10.1103/PhysRevLett.100.100501] +# This is Mathematica code converted +# to Python using chatGPT4-o. +################################################ + +import numpy as np +import scipy + +def get_paulis(): + """ + Gets 1 qubit Pauli basis. + """ + id2 = np.array([[1, 0], [0, 1]], dtype=complex) + px = np.array([[0, 1], [1, 0]], dtype=complex) + py = np.array([[0, -1j], [1j, 0]], dtype=complex) + pz = np.array([[1, 0], [0, -1]], dtype=complex) + + return id2, px, py, pz + +def prl_h(Bz): + """ + Returns 2 qubit PRL Hamiltonian. + """ + id2, px, _, pz = get_paulis() + h0 = np.kron(pz, pz) + 0.1 * (np.kron(px, id2) + np.kron(id2, px)) + h1 = np.kron(pz, id2) + np.kron(id2, pz) + h = h0 + Bz * h1 + + return h0, h1, h + +def prl_gs_gtau(Bz, tau): + """ + Returns G(tau) = - ^2 for + = for |psi(Bz)> + the ground-state of PRL model. + """ + # Compute the eigenvalues and eigenvectors of H(Bz) + _, h1, h = prl_h(Bz) + evals, evecs = np.linalg.eig(h) + + # Find the ground state energy and corresponding eigenvector + e0 = np.min(evals) + psi0 = evecs[:, np.argmin(evals)] + + # Compute the correlation term + corr = np.sum([ + np.exp(tau * (e0 - evals[j])) * np.abs(np.dot(np.conj(psi0.T), np.dot(h1, evecs[:, j])))**2 + for j in range(len(evals)) + ]) + + # Compute the average value of H1 in the ground state + avgH1 = np.dot(np.conj(psi0.T), np.dot(h1, psi0)) + + # Compute gTau + gTau = corr - (avgH1)**2 + if gTau.imag < 1e-8: + gTau = gTau.real + + return gTau + +def prl_gs_chiE(Bz): + """ + Integrates prl_gs_gtau over tau from [0, np.inf]. + """ + result, error = scipy.integrate.quad(lambda tau: prl_gs_gtau(Bz, tau), 0, np.inf) + return result, error + +def prl_gs_chiF(Bz): + """ + Integrates tau*prl_gs_gtau over tau from [0, np.inf]. + """ + result, error = scipy.integrate.quad(lambda tau: tau * prl_gs_gtau(Bz, tau), 0, np.inf) + return result, error + +def prl_beta_gtau(Bz, beta, tau): + """ + Returns G(tau) = - ^2 for + = Tr[H_1 rho(Bz, beta)] for rho the + thermal state PRL model at inverse temp beta. + """ + # Compute the eigenvalues and eigenvectors of H(Bz) + _, h1, h = prl_h(Bz) + evals, evecs = np.linalg.eig(h) + + # Compute the partition function Z + z = np.sum(np.exp(-beta * evals)) + + # Compute the correlation term + corr = np.sum([ + np.exp(- (beta - tau) * evals[i]) * np.exp(- tau * evals[j]) * + np.abs(np.dot(np.conj(evecs[:, i].T), np.dot(h1, evecs[:, j])))**2 + for i in range(len(evals)) + for j in range(len(evals)) + ]) / z + + # Compute the average value of H1 + avgH1 = np.sum([ + np.exp(-beta * evals[i]) * np.dot(np.conj(evecs[:, i].T), np.dot(h1, evecs[:, i])) + for i in range(len(evals)) + ]) / z + + # Compute gTau + gTau = corr - (avgH1)**2 + if gTau.imag < 1e-8: + gTau = gTau.real + + return gTau + +def prl_beta_chiE(Bz, beta): + """ + Integrates prl_beta_gtau over tau from [0, beta]. + """ + result, error = scipy.integrate.quad(lambda tau: prl_beta_gtau(Bz, beta, tau), 0, beta) + return result, error + +def prl_beta_chiX(Bz, beta): + """ + Integrates tau*prl_beta_gtau over tau from [0, beta]. + """ + result, error = scipy.integrate.quad(lambda tau: tau * prl_beta_gtau(Bz, beta, tau), 0, beta) + return result, error + +def prl_beta_chiF(Bz, beta): + """ + Integrates tau*prl_beta_gtau over tau from [0, beta/2]. + """ + result, error = scipy.integrate.quad(lambda tau: tau * prl_beta_gtau(Bz, beta, tau), 0, beta/2) + return result, error + +def prl_gs_fidsus(Bz, epsilon=0.0001): + """ + Returns T = 0 fid sus for PRL model. + """ + # Compute the ground state vector for H(Bz) + _, _, h = prl_h(Bz) + _, evecs = scipy.sparse.linalg.eigsh(h, k=1, which='SA') + psi0 = evecs[:, 0] + + # Compute the ground state vector for H(Bz + epsilon) + _, _, heps = prl_h(Bz + epsilon) + _, evecs_eps = scipy.sparse.linalg.eigsh(heps, k=1, which='SA') + psiPeps = evecs_eps[:, 0] + + # Compute dPsi + dPsi = (psiPeps - psi0) / epsilon + + # Compute chi + chi = (np.dot(np.conj(dPsi.T), dPsi) - + np.dot(np.conj(dPsi.T), psi0) * np.dot(np.conj(psi0.T), dPsi)) + if chi.imag < 1e-8: + chi = chi.real + + return chi +# %% +# %% diff --git a/utils/ioscripts.py b/utils/ioscripts.py new file mode 100644 index 0000000..5e7ed03 --- /dev/null +++ b/utils/ioscripts.py @@ -0,0 +1,238 @@ +""" +// +// This program contains code to handle common I/O tasks in support of: +// Nic Ezzell, Lev Barash, Itay Hen, Exact and universal quantum Monte Carlo estimators for energy susceptibility and fidelity susceptibility. +// +// +""" +# %% +import re +import numpy as np + +def make_no_stand_param_fstr(seed, beta, tau, Tsteps=100000, steps=1000000, +stepsPerMeasurement=10): + """ + Writes parameters.hpp without any standard observables. + """ + param_str = make_param_file_header(seed, beta, tau, Tsteps, steps, stepsPerMeasurement) + param_str += make_param_file_footer() + return param_str + +def make_hdiag_fidsus_param_fstr(seed, beta, Tsteps=100000, steps=1000000, +stepsPerMeasurement=10): + """ + Makes parameters.hpp with Hdiag measurements relevant to compute + fidelity suscepibility. + """ + param_str = make_param_file_header(seed, beta, 0.0, Tsteps, steps, stepsPerMeasurement) + param_str += """ +// +// Below is the list of standard observables: +// + +#define MEASURE_HDIAG // is measured when this line is not commented +#define MEASURE_HDIAG_INT1 +#define MEASURE_HDIAG_INT2 +#define MEASURE_HDIAG_INT3 +""" + param_str += make_param_file_footer() + + return param_str + +def make_hoffdiag_fidsus_param_fstr(seed, beta, Tsteps=100000, steps=1000000, +stepsPerMeasurement=10): + """ + Makes parameters.hpp with Hoffdiag measurements relevant to compute + fidelity suscepibility. + """ + param_str = make_param_file_header(seed, beta, 0.0, Tsteps, steps, stepsPerMeasurement) + param_str += """ +// +// Below is the list of standard observables: +// + +#define MEASURE_HOFFDIAG // is measured when this line is not commented +#define MEASURE_HOFFDIAG_INT1 +#define MEASURE_HOFFDIAG_INT2 +#define MEASURE_HOFFDIAG_INT3 +""" + param_str += make_param_file_footer() + + return param_str + +def make_all_stand_param_fstr(seed, beta, tau, Tsteps=100000, steps=1000000, +stepsPerMeasurement=10): + """ + Makes parameters.hpp with all standard observables. + """ + param_str = make_param_file_header(seed, beta, tau, Tsteps, steps, stepsPerMeasurement) + param_str += """ +// +// Below is the list of standard observables: +// + +#define MEASURE_H // is measured when this line is not commented +#define MEASURE_H2 // is measured when this line is not commented +#define MEASURE_HDIAG // is measured when this line is not commented +#define MEASURE_HDIAG2 // is measured when this line is not commented +#define MEASURE_HOFFDIAG // is measured when this line is not commented +#define MEASURE_HOFFDIAG2 // is measured when this line is not commented +#define MEASURE_Z_MAGNETIZATION // Z-magnetization is measured when this line is not commented +#define MEASURE_HDIAG_CORR +#define MEASURE_HDIAG_INT1 +#define MEASURE_HDIAG_INT2 +#define MEASURE_HDIAG_INT3 +#define MEASURE_HOFFDIAG_CORR +#define MEASURE_HOFFDIAG_INT1 +#define MEASURE_HOFFDIAG_INT2 +#define MEASURE_HOFFDIAG_INT3 +""" + param_str += make_param_file_footer() + + return param_str + +def parse_otxt_temp(temp_fname): + """ + Parse temporary output file. + """ + with open(temp_fname, 'r') as f: + lines = f.readlines() + # define regular expression parser for numbers + #p = '[\d]+[.,\d]+|[\d]*[.][\d]+|[\d]+' + p = r'[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?|nan' + # get emergent quantities + sign = re.findall(p, lines[2])[0] + sign_std = re.findall(p, lines[3])[0] + mean_q = re.findall(p, lines[4])[0] + max_q = re.findall(p, lines[5])[0] + emergent = [sign, sign_std, mean_q, max_q] + otxt_array = [] + otxt_std = [] + # get O.txt observables + for j in range(1, 7): + idx = 6 + 3*j - 2 + otxt_array.append(re.findall(p, lines[idx])[0]) + idx += 1 + otxt_std.append(re.findall(p, lines[idx])[0]) + otxt_array = np.array([float(x) for x in otxt_array]) + otxt_std = np.array([float(x) for x in otxt_std]) + # get time simulation took + idx = 6 + 3*6 + time = re.findall(p, lines[idx])[0] + + return emergent, otxt_array, otxt_std, time + +def parse_fidsus_temp(temp_fname): + """ + Parse temporary output file. + """ + with open(temp_fname, 'r') as f: + lines = f.readlines() + # define regular expression parser for numbers + #p = '[\d]+[.,\d]+|[\d]*[.][\d]+|[\d]+' + p = r'[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?|nan' + # get emergent quantities + sign = re.findall(p, lines[2])[0] + sign_std = re.findall(p, lines[3])[0] + mean_q = re.findall(p, lines[4])[0] + max_q = re.findall(p, lines[5])[0] + emergent = [sign, sign_std, mean_q, max_q] + obs_array = [] + obs_std = [] + # get (or Hdiag, Hoffdiag), and 3 integrals thereof + for j in range(1, 5): + idx = 6 + 3*j - 2 + obs_array.append(re.findall(p, lines[idx])[0]) + idx += 1 + obs_std.append(re.findall(p, lines[idx])[0]) + # get time simulation took + idx = 6 + 3*4 + time = re.findall(p, lines[idx])[0] + + return emergent, obs_array, obs_std, time + +def parse_correlator_temp(temp_fname): + """ + Parse temporary output file. + """ + with open(temp_fname, 'r') as f: + lines = f.readlines() + # define regular expression parser for numbers + #p = '[\d]+[.,\d]+|[\d]*[.][\d]+|[\d]+' + p = r'[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?|nan' + # get emergent quantities + sign = re.findall(p, lines[2])[0] + sign_std = re.findall(p, lines[3])[0] + mean_q = re.findall(p, lines[4])[0] + max_q = re.findall(p, lines[5])[0] + emergent = [sign, sign_std, mean_q, max_q] + obs_array = [] + obs_std = [] + # get and + for j in range(1, 3): + idx = 6 + 3*j - 2 + obs_array.append(re.findall(p, lines[idx])[0]) + idx += 1 + obs_std.append(re.findall(p, lines[idx])[0]) + # get time simulation took + idx = 6 + 3*2 + time = re.findall(p, lines[idx])[0] + + return emergent, obs_array, obs_std, time + +# +# helper functions only for this file +# +def make_param_file_header(seed, beta, tau, Tsteps=10000000, steps=1000000, +stepsPerMeasurement=10): + """ + Writes parameters.hpp header. + """ + param_str = """ +// +// This program helps implement estimators for the titular quantities in the preprint: +// Nic Ezzell, Lev Barash, Itay Hen, Exact and universal quantum Monte Carlo estimators for energy susceptibility and fidelity susceptibility. +// +// This work augments the base code whose information is below. +// +// +// This program implements Permutation Matrix Representation Quantum Monte Carlo for arbitrary spin-1/2 Hamiltonians. +// +// This program is introduced in the paper: +// Lev Barash, Arman Babakhani, Itay Hen, A quantum Monte Carlo algorithm for arbitrary spin-1/2 Hamiltonians, Physical Review Research 6, 013281 (2024). +// +// This program is licensed under a Creative Commons Attribution 4.0 International License: +// http://creativecommons.org/licenses/by/4.0/ +// +// ExExFloat datatype and calculation of divided differences are described in the paper: +// L. Gupta, L. Barash, I. Hen, Calculating the divided differences of the exponential function by addition and removal of inputs, Computer Physics Communications 254, 107385 (2020) +// + +// +// Below are the parameter values: +// + """ + # add actual adjustable parameters + param_str += f""" +#define Tsteps {Tsteps} // number of Monte-Carlo initial equilibration updates +#define steps {steps} // number of Monte-Carlo updates +#define stepsPerMeasurement {stepsPerMeasurement} // number of Monte-Carlo updates per measurement +#define beta {beta} // inverse temperature +#define tau {tau} //imaginary propogation time +#define rng_seed {seed} +""" + return param_str + +def make_param_file_footer(): + # add final technical parameters, not usually needed to adjust + param_str = """ +// +// Below are the implementation parameters: +// + +#define qmax 1000 // upper bound for the maximal length of the sequence of permutation operators +#define Nbins 250 // number of bins for the error estimation via binning analysis +#define EXHAUSTIVE_CYCLE_SEARCH // comment this line for a more restrictive cycle search + """ + return param_str +#%% diff --git a/utils/pauli_manipulations.py b/utils/pauli_manipulations.py new file mode 100644 index 0000000..359c92a --- /dev/null +++ b/utils/pauli_manipulations.py @@ -0,0 +1,377 @@ +""" +// +// This program contains code generate and apply the "special random unitaries" described in Appendix A of: +// Nic Ezzell, Lev Barash, Itay Hen, Exact and universal quantum Monte Carlo estimators for energy susceptibility and fidelity susceptibility. +// +// +""" +# %% +import numpy as np + +class PauliTerm: + def __init__(self, c, supp, ops, n): + if len(supp) != len(ops): + raise ValueError("The length of 'supp' must match the length of 'ops'") + self.c = c + # sort the support from smallest to larget always + supp = np.array(supp) + ops = np.array(ops) + sort_idx = np.argsort(supp) + self.supp = tuple(supp[sort_idx]) + self.ops = tuple(ops[sort_idx]) + self.n = n + + def __str__(self): + full_ops = ['I'] * self.n + for idx, op in zip(self.supp, self.ops): + full_ops[idx - 1] = op + ops_str = ''.join(full_ops) + return f"{self.c}*{ops_str}" if self.c != 1 else f"{ops_str}" + + def __eq__(self, other): + n_check = (self.n == other.n) + c_check = np.isclose(self.c, other.c) + sup_check = (self.supp == other.supp) + op_check = (self.ops == other.ops) + return n_check and c_check and sup_check and op_check + + def to_pmr_line(self): + parts = [f"{self.c:.6f}"] + for idx, op in zip(self.supp, self.ops): + parts.append(str(idx)) + parts.append(op) + return ' '.join(parts) + + def __mul__(self, m): + """Handle scalar multiplication from the left: other * self.""" + if isinstance(m, (int, float, complex)): # Ensure other is a number + return PauliTerm(m * self.c, self.supp, self.ops, self.n) + return NotImplemented + + def __rmul__(self, m): + """Handle scalar multiplication from the right: self * other.""" + return self.__mul__(m) # Multiplication is commutative for scalars + + def get_dagger(self): + """ + Returns p^{\dagger}. + """ + return PauliTerm(np.conj(self.c), self.supp, self.ops, self.n) + + def bi_conj(self, p2, p3): + """Conjugate self (p1) by p2 from left and p3^dagger from right to form p4 = p2 * p1 * p3^dagger""" + if self.n != p2.n or self.n != p3.n: + raise ValueError("All Pauli terms must have the same number of qubits.") + + # Initialize new coefficient + new_c = self.c * p2.c * np.conj(p3.c) # Include conjugate for p3 + + # Initialize the result ops + new_ops = ['I'] * self.n + + # Apply the bi-conjugation across all qubits + for i in range(self.n): + # exact operators + if i + 1 in self.supp: + op1 = self.ops[self.supp.index(i + 1)] + else: + op1 = 'I' + if i + 1 in p2.supp: + op2 = p2.ops[p2.supp.index(i + 1)] + else: + op2 = 'I' + if i + 1 in p3.supp: + op3 = p3.ops[p3.supp.index(i + 1)] + else: + op3 = 'I' + + # Apply p2 * p1 + interim_op, interim_phase = self.pauli_single_multiply(op2, op1) + # Apply result * p3^dagger + new_op, phase = self.pauli_single_multiply(interim_op, op3) + new_ops[i] = new_op + new_c *= interim_phase * phase # Adjust coefficient with combined phases + + new_supp = [i + 1 for i, op in enumerate(new_ops) if op != 'I'] + non_id_ops = [p for p in new_ops if p != 'I'] + + return PauliTerm(new_c, new_supp, non_id_ops, self.n) + + + @staticmethod + def pauli_single_multiply(op1, op2): + """Multiply two single-qubit Pauli operators and return the result and phase.""" + if op1 == 'I': + return op2, 1 + if op2 == 'I': + return op1, 1 + if op1 == op2: + return 'I', 1 # Multiplying same operators results in the identity + + # Mapping rules for multiplying Pauli matrices, considering phases + rules = { + ('X', 'Y'): ('Z', 1j), + ('Y', 'Z'): ('X', 1j), + ('Z', 'X'): ('Y', 1j), + ('Y', 'X'): ('Z', -1j), + ('Z', 'Y'): ('X', -1j), + ('X', 'Z'): ('Y', -1j), + } + result, phase = rules.get((op1, op2), rules.get((op2, op1), ('I', -1))) # Check both tuple orders + return result, phase + +class PauliH: + def __init__(self, n=None, terms=None): + self.terms = [] + self.n = n + if terms is not None: + for term in terms: + self.add_term(term, simplify=False) + self.simplify() + + def __contains__(self, pauli): + for op in self.terms: + if op == pauli: + return True + return False + + def __eq__(self, other): + # simplify first to ensure things are correct + self.simplify() + other.simplify() + if len(other.terms) != len(self.terms): + return False + for o in other.terms: + if o not in self: + return False + return True + + def add_term(self, term, simplify=True): + if self.n is not None and term.n != self.n: + raise ValueError("The number of qubits in the term must match the Hamiltonian.") + if self.n is None: + self.n = term.n + self.terms.append(term) + if simplify: + self.simplify() + + def simplify(self): + """Combine like terms and ensure the coefficients are real at the end.""" + combined_terms = {} + for term in self.terms: + key = (term.supp, term.ops) + if key in combined_terms: + combined_terms[key].c += term.c + combined_terms[key].c = self.truncate_imaginary(combined_terms[key].c) + else: + combined_terms[key] = PauliTerm(term.c, term.supp, term.ops, self.n) + + new_terms = [] + for term in combined_terms.values(): + if not np.isclose(term.c.imag, 0): + raise ValueError("The final coefficients must be real.") + elif not np.isclose(term.c.real, 0): + new_terms.append(PauliTerm(term.c.real, term.supp, term.ops, self.n)) + + self.terms = new_terms + + def __str__(self): + """Return a string representation of the Hamiltonian.""" + if not self.terms: + return "0" + return " + ".join(str(term) for term in self.terms) + + def to_pmr_str(self): + """Return the PMR formatted string of the Hamiltonian, each term on a new line.""" + return "\n".join(term.to_pmr_line() for term in self.terms) + + def set_as_1q_model(self, diag_pert, lam): + self.terms = [] + if diag_pert is True: + self.add_term(PauliTerm(1.0, [1], ['X'], self.n)) + self.add_term(PauliTerm(lam, [1], ['Z'], self.n)) + else: + self.add_term(PauliTerm(lam, [1], ['X'], self.n)) + self.add_term(PauliTerm(1.0, [1], ['Z'], self.n)) + + return + + def conjugate(self, u): + if not isinstance(u, PauliU): + raise TypeError("The object must be an instance of PauliU.") + new_hamiltonian = PauliH(n=self.n) + for term_h in self.terms: + for term_u1 in u.terms: + for term_u2 in u.terms: + # Conjugate h by u1 and u2^dagger + new_term = term_h.bi_conj(term_u1, term_u2) + new_hamiltonian.add_term(new_term, simplify=False) + new_hamiltonian.simplify() + return new_hamiltonian + + def __add__(self, other): + if not isinstance(other, PauliH): + return NotImplemented + if self.n != other.n: + raise ValueError("Cannot add Hamiltonians with different numbers of qubits.") + new_terms = self.terms + other.terms + return PauliH(self.n, new_terms) + + def __mul__(self, other): + if isinstance(other, (int, float)): + new_terms = [other * term for term in self.terms] + return PauliH(self.n, new_terms) + return NotImplemented + + def __rmul__(self, other): + return self.__mul__(other) + + @staticmethod + def truncate_imaginary(c, atol=1e-9): + if np.isclose(c.imag, 0, atol=atol): + return c.real + return c + +class PauliU: + def __init__(self, n=None, terms=None): + self.terms = [] + self.n = n + if terms is not None: + for term in terms: + self.add_term(term) + self.validate_normalization() + if terms is None and n is not None: + self.terms = [PauliTerm(1.0, [], [], n)] + + def __contains__(self, pauli): + for op in self.terms: + if op == pauli: + return True + return False + + def add_term(self, term): + if self.n is not None and term.n != self.n: + raise ValueError("The number of qubits in the term must match the Hamiltonian.") + if self.n is None: + self.n = term.n + # remove identity term if present + id = PauliTerm(1.0, [], [], self.n) + if id in self: + self.terms = [] + self.terms.append(term) + self.validate_anticommutation() + + def __str__(self): + if not self.terms: + return "0" + return " + ".join(str(term) for term in self.terms) + + def to_pmr_str(self): + return "\n".join(term.to_pmr_line() for term in self.terms) + + def validate_normalization(self): + """Ensure the sum of squares of the coefficients is 1.""" + c = np.array([p.c for p in self.terms]) + if not np.isclose(np.linalg.norm(c), 1): + raise ValueError("The coefficients are not normalized properly.") + return True + + def validate_anticommutation(self): + """Ensure all Pauli terms anti-commute.""" + for i in range(len(self.terms)): + for j in range(i + 1, len(self.terms)): + if not self.check_anticommutation(self.terms[i], self.terms[j]): + raise ValueError("Not all Pauli terms anti-commute.") + return True + + def set_as_random(self, l, eps=0.05, mu=0, sig=1, seed=None): + """ + Forms random n qubit unitary consisting of + sum of l Pauli, anticommuting Paulis. + See [J. Chem. Theory Comput. 2020, 16, 1, 190–195]. + """ + if seed is not None: + np.random.seed(seed) + if l > 2 * self.n + 1: + raise ValueError("l cannot be bigger than 2n+1") + elif l == 0: + print("No setting done since l = 0") + return + self.terms = [] + # choose l random anti-commuting paulis + canon_paulis = self.form_canon_anticomm_paulis(self.n) + if l == 1: + l_paulis = [canon_paulis[0]] + elif l == 2: + l_paulis = [canon_paulis[0], canon_paulis[-1]] + else: + l_pauli_idx = np.random.choice(range(1, len(canon_paulis)-1), l-2, replace=False) + l_paulis = [canon_paulis[0], canon_paulis[-1]] + [canon_paulis[i] for i in l_pauli_idx] + # choose l real, normalized coefficients + if l == 1: + c = [1.0] + else: + if eps is not None: + c1 = [np.sqrt(1 - eps)] + c_rest = np.random.normal(mu, sig, l-1) + fac = np.sqrt(eps) / np.linalg.norm(c_rest) + c = c1 + list(fac * c_rest) + else: + c = np.random.normal(mu, sig, l) + c = np.array(c) + c /= np.linalg.norm(c) + # build and add as PauliTerms + for k in range(l): + p = self.form_pauli_term(c[k], l_paulis[k]) + self.add_term(p) + self.validate_normalization() + self.validate_anticommutation() + + def get_inverse(self): + """ + Returns u^{\dagger}, i.e., inverse of [self]. + """ + new_u = PauliU(self.n, [p.get_dagger() for p in self.terms]) + return new_u + + @staticmethod + def check_anticommutation(term1, term2): + differing_positions = 0 + for i in range(term1.n): + op1 = term1.ops[term1.supp.index(i+1)] if i+1 in term1.supp else 'I' + op2 = term2.ops[term2.supp.index(i+1)] if i+1 in term2.supp else 'I' + if op1 != 'I' and op2 != 'I' and op1 != op2: + differing_positions += 1 + return differing_positions % 2 == 1 + + @staticmethod + def form_canon_anticomm_paulis(n): + """ + See 4.2 in [https://arxiv.org/pdf/1909.08123.pdf], + construction (1). + """ + g = ["X", "Y", "Z"] + for _ in range(n-1): + x_part = ["X" + p for p in g] + y_part = ["Y" + "I" * len(g[0])] + z_part = ["Z" + "I" * len(g[0])] + g = x_part + y_part + z_part + + return g + + @staticmethod + def form_pauli_term(c, pstr): + """ + Forms PauliTerm objct given + coefficient [c] and Pauli string + [pstr]. + """ + supp = [] + ops = [] + for k in range(len(pstr)): + if pstr[k] != "I": + supp.append(k + 1) + ops.append(pstr[k]) + + return PauliTerm(c, supp, ops, len(pstr)) +# %% \ No newline at end of file diff --git a/utils/rng_seeds.txt b/utils/rng_seeds.txt new file mode 100644 index 0000000..41406b9 --- /dev/null +++ b/utils/rng_seeds.txt @@ -0,0 +1 @@ +1370102282,3629902865,116633646,2161158202,1224887876,2172515818,2107545667,2270171661,1679887754,2910029232,2684078236,966760014,1539112871,1879308759,827874989,1313211672,185933287,2643867551,1400802099,309524274,463614167,984633678,1740697352,3748383779,3085492750,2902098116,1493382549,3026438146,3864133010,2631191514,1544744623,3277886342,4109413259,2796826566,593867960,1999566690,1365603128,3116011270,3923723647,1405552382,2904898082,2206110902,3890146427,1033691786,1305568115,2666264218,1193423336,316042340,2972217193,120783945,4198334275,167272076,2084310749,981201754,1941422520,1969874232,1089504624,3925654872,1635187307,3066238109,4023596244,3528225635,3298922368,955659914,3498504272,2005153104,365680250,3415748027,2556728122,1943401384,1376792554,3742337845,1059681274,589891671,1015707617,2834120170,3978301935,2457553573,1054649413,3736065955,1924539507,1066740066,4190091642,4106875694,4206712692,3966559838,2781301355,310751418,1957216706,3153889764,3114679332,532428812,1067073608,3333709682,2285602057,3105502193,3623364247,440895945,414461690,1939027982,3083918378,4177218346,726759520,2394654568,3902081244,2732975739,1173987662,3874332200,1874165137,1560488242,1042466736,3270922672,287937285,3971722981,2840652955,1775715253,207497198,3633748050,2322394112,3713261617,2516168770,2980838818,3611256820,2690663284,1509603260,2823019668,441428742,1443626223,4197895505,3823201178,4139230147,889968061,1940035201,2768049352,4291582104,890358580,2578788206,3863777506,2757686751,3477413459,3148207824,2782398609,2236009934,2369603792,2576231269,1304304866,3545020371,3315985958,465888435,2299309492,3885499547,3068131737,326960992,2463242580,2917961486,425735379,2620814916,1152889064,1505910274,207876882,3997475269,1150380211,3515059557,4180822714,1381684739,490462653,1605579023,983569841,3955237754,2199663394,1664167691,3929878293,1859862739,3145589783,707052815,1283488812,1274856162,3151118827,4012268429,2588555732,2503448790,3003107129,2472604903,156501363,1866354567,751305685,2479482358,3408240184,3604855071,1499596909,872180645,3235194172,1401678796,2714553008,2152699823,3521459589,3820014055,577760701,2525635075,367331377,873689366,1147802920,3459890617,3459513631,1009015957,2405594699,2345611120,1118397360,2882148632,203934781,2309822018,1487419905,1113393242,2107494760,3142605911,2828082910,3524985302,22001458,610114636,3204384605,2395512508,518842588,1494904794,1376361880,3913009820,2586187216,2121883160,1662111578,261266425,3896729877,3839814610,3668062776,582328807,2417227081,3846609480,2631817754,724404805,539609668,1811131654,652825413,124292029,135899855,4176457276,2421969481,1548732438,2259140668,713285251,3089953340,872038178,11728916,1985099697,940580258,1472150138,1387057582,2030371599,483105713,882112418,2191420984,3473821491,1330581202,1305295263,3429021035,3982217656,2295646581,478694497,2242141170,557645059,864783281,298608701,1559947421,859520684,579672882,2564667943,643922992,1546154560,565517867,1120776931,3680794331,1552893433,1026545651,3903009484,530874743,1622097988,2625421465,1191674996,1715403753,623864622,1472624766,1408428232,855642085,3206949433,4155130202,1406451306,2115664741,815381914,4261906143,82853750,1948780357,3047982799,3875192055,1812718256,3365894589,4053334417,1708512282,2230429471,4178246455,2192532718,2789594475,1054588773,3238010692,1418141283,3539994143,2534343970,3666268058,1229123119,4178199083,1550574455,2287028942,128737238,3120571410,2477007545,3756436726,889865250,2705837328,1035295247,1872589440,2372767980,415941622,4131359901,3696799124,3204680005,1083274017,2566831451,805923666,2430316485,2526969006,1829674545,3831917090,3717055254,2887240763,3774654940,3190292210,1864029138,314821957,2215596925,313783522,787902540,2156762538,2848529963,2880766419,1214364301,3399370605,556729694,4162627556,1621052021,843245335,4263182395,1445705552,1986837246,2736132102,2494624090,1892613471,2377915415,4273090053,2503333801,3175028645,2978540429,796778795,1893491397,3361046102,1929692729,1994468803,1030804692,3078972355,449831678,1044825917,1992126950,2417803977,1306626019,345425735,146150164,1448495701,1245782423,4024708362,2925650006,4194544187,395129907,1734777606,722413482,3375629660,1825261616,3890141802,3186785870,3170707990,3243434667,3185413896,3482492067,1490102168,4020670034,2143952856,2523060572,1288718106,1330636575,526371037,658969063,2918274712,2271209487,311791278,306138304,4229091974,1322771397,59537823,3950114362,3913886860,2496599273,1956630264,2863379568,3528082580,2086269600,642581694,3386988763,981180088,3122559017,613095672,3851269123,1803204752,2220914438,3392765575,294920235,2976152216,2979049133,1320311535,1437852426,1716104150,375777835,1048209064,359882412,2733303176,3506860210,1083476856,3856466111,4219837490,4212394807,1066245256,3989947520,411546739,18008303,2834907767,1664897816,3566812681,57085556,494540286,1350844985,2805495756,700587445,2115911458,2508969809,2621747775,2259337583,4044552445,3463273943,3574584043,526541954,3136036419,2745240603,3483580802,3998528805,4112225814,2365241313,492598594,1018926876,956859868,2947310523,657972640,1806814502,4201391052,1910890789,2786296105,73612029,902875638,3009242234,1337370921,1044676017,2097276385,1003116874,149937831,894791970,3099568702,2944340614,3904505105,3573029468,3153146336,2525565604,610677257,683967725,34778682,1685540924,27772865,1346991676,3256350339,1232729659,4263758434,4228598664,3542840030,1306419499,2302041956,101771213,3424975287,2930120162,2263041864,1147675767,404495443,1359644403,2617729759,1409484384,243585935,2110223295,176879658,2962110967,3931845450,3500394460,120416574,2677221384,1095801904,2204826073,3261603755,1450763268,3507291855,611215628,1284899485,4039301197,1198863146,1626708753,3323697896,418717663,68947740,4244498561,1568403798,2896149158,472375869,863497662,3410057001,1479322397,2594758345,2940921976,2320955245,3432980280,1745113295,10188746,3573374579,1102103577,2326821743,359854492,44501870,3193024982,987136441,2880225360,4195518334,3988604377,2675099172,3905390723,1678114989,2261801678,3095418215,2544831181,2798722308,1939376052,1453043003,2303390569,840565393,3663677720,1796961737,2263460444,2520927640,2139876889,748383031,2792394148,4272031627,3459730256,2575271861,916783207,4248487056,2672640974,771129629,4034768866,450688697,1188774838,2375216608,2223195687,3420482824,230280072,118022343,1347386162,1617374068,2151298035,3432210017,1504814918,2849293135,455540134,98260495,2089011455,2611553957,638608836,2764566449,2975963896,1431795503,1562047420,3719780179,2289183642,2316803412,2592790014,3412472893,3377084588,297088842,3574399729,31545348,1545048599,1068449587,2648321031,1440392286,2899469568,2804492283,2471795932,3575787087,3598476650,226919256,612942947,2166865736,2462451599,3175929393,4274983246,770950878,3493395187,2869617332,1033399907,1828636316,3733704351,3001726091,3911363561,3067570660,4194897324,2897835044,936753690,1626888100,1869467829,194952153,2523740154,1188173179,2472209258,1594195328,198966466,2240010616,3364870777,4030346378,4146233662,3581008549,3037431528,3240635659,3244556469,2095153223,2303322106,463413683,1848877915,2636913508,3672941253,498085744,2223805423,497366142,3373134713,2457329754,2045866661,2147319274,1316924606,3735180890,1380340518,2951840984,1796504667,23123496,3478935695,1877427202,3685515161,2668507684,2374819452,3804506899,280168495,1040885122,1206081889,2788177130,203617864,2562978985,1369085156,4004748585,4219920693,748065664,615575168,3860480927,3992737703,542134890,982343667,1147758017,2865930507,2990809917,1950250620,3805469599,2217815536,3917668090,1437390477,4167973632,757346486,1987940936,4131866695,590142740,3927486406,2441840720,311840525,1214171776,3575567383,2435848726,3063880649,716002013,2927705036,555805401,1247182333,4110572247,4187146909,1680687956,2532024999,1855257572,276471218,3881981648,3720923999,3664158922,1476829029,1663940057,3076831451,1394488360,3342417807,1441255560,1401071938,281396322,2185780497,1954741658,3279047158,3766954772,3068463527,2858800979,2345449289,2053021696,2195409440,3057503802,1737138009,2141597536,3222531933,2544550269,1183306636,2406850691,3808009286,1819862633,3735629691,1915699107,2594877848,2310435098,1835786295,213077301,3489609938,4149549314,2638038442,2389720649,1183821774,2358277369,1951446407,2639897194,2304169323,3029016153,4201762328,3739438928,1578290227,2310351730,2159422586,1577564520,717755834,2318026151,1455124238,2760845151,1282907043,1464198117,2922447477,819673961,2819259345,2046640480,3148204214,3454761726,141708938,2131087559,1279864539,2877099498,3131006687,2568414800,3814523488,1948188870,574110656,3399005546,1153237651,2891212925,728690704,1126396379,3917044533,542141828,3831216749,864623470,283990324,957038467,1103151280,2478543296,3211113288,4178722420,2954204186,1397943654,271148483,2276417274,4245486075,4031288671,2944967346,959274530,1976413667,1584370374,2255270447,3803721471,2120604755,4192286502,2948077916,619863095,1349539845,2491350909,2784426111,2077794082,1180091158,1564428046,4171012319,1940700011,3885665151,2700695060,3712719929,3106201746,559645246,2145681904,1033563590,2022888748,3346086483,1391579036,2285345907,2403905158,1165798195,1804984918,4241887643,2601699805,4021164733,2645000592,464387156,2351808058,1866509837,26319246,3630017710,3157190049,1216523642,3291008287,1405387773,3190046591,1139286148,3308590910,915233907,3210138516,1933566852,560253083,2993988628,4161358565,3704308254,2884842235,3200396466,2243992580,3720729622,4210126407,2545714556,3054115704,585089435,40534140,3738244422,690944222,2197288242,3792479049,1284441860,1548816035,3829865661,921909970,1038409843,441723538,2915855832,3172192724,2552427004,1296573591,300986680,1427390437,4293975749,2262242569,4167781001,3069603967,1774531231,3872878912,2146783951,3499820559,285121785,3123831098,3483854656,2912654356,390071105,527606555,739115758,2421737471,1432471840,2570235262,3140165797,2656295606,360095063,4272321341,2238669482,1923432588,943450680,470424263,2268415650,1286733247,982064248,2161644866,3263627576,3630910169,2284311392,4009648446,463180946,3876068731,656733382,551843255,2876108015,2732221737,3046485391,1459523161,830607564,2623961133,283663460,3229697737,2640215514,3605471386,2875596232,2207838803,708807186,1604030860,3670906890,441644567,1167142059,1373478917,2092664462,1134046329,671684398,1505707815,2882900646,933818089,1224726129,3644265504,122768576,3958837572,559466027,1172602912,2558134427,2747247890,307849806,950575431,1437023,4110337814,2657684699,1849266949,768665725,3435387551,321815520,603584029,3841084984,749781672,3171082152,1994635056,2548565798,723845896,2340813880,3093895340,1623492590,2714409800,186272570,3336784178,82964513,2197022323,1009961741,3303372055,2661258744,3266514507 \ No newline at end of file