From 1469e4205dd18064fcff18e2f16a751e0d6695bc Mon Sep 17 00:00:00 2001 From: "Documenter.jl" Date: Sun, 10 Nov 2024 09:25:18 +0000 Subject: [PATCH] build based on 8805e9b --- dev/.documenter-siteinfo.json | 2 +- dev/assets/titlepage_sparseirjl.pdf | Bin 42296 -> 65514 bytes dev/guide/index.html | 36 ++++++++++------------------ dev/index.html | 2 +- dev/objects.inv | Bin 2672 -> 2667 bytes dev/private/index.html | 28 +++++++++++----------- dev/public/index.html | 10 ++++---- dev/refs.bib | 19 ++++++++------- dev/search_index.js | 2 +- 9 files changed, 45 insertions(+), 54 deletions(-) diff --git a/dev/.documenter-siteinfo.json b/dev/.documenter-siteinfo.json index 408d610..820bf14 100644 --- a/dev/.documenter-siteinfo.json +++ b/dev/.documenter-siteinfo.json @@ -1 +1 @@ -{"documenter":{"julia_version":"1.11.1","generation_timestamp":"2024-10-22T05:02:00","documenter_version":"1.7.0"}} \ No newline at end of file +{"documenter":{"julia_version":"1.11.1","generation_timestamp":"2024-11-10T09:25:12","documenter_version":"1.7.0"}} \ No newline at end of file diff --git a/dev/assets/titlepage_sparseirjl.pdf b/dev/assets/titlepage_sparseirjl.pdf index c74e7aea1dd256f42082fb2bfb977b0b56235ae4..cbe7089f0d0f3cb188b9eb2b1c3035b7a54afa25 100644 GIT binary patch delta 22928 zcmeIab$k=q_cxA9VX?(!DJ))S-0+!X;x_RFjZQpiJV6F`7K%F*TdervEnb}BUfkWS z$aB-eF6{32^Zh=r=a1j-d0wQD%v?G5+;h&A_c?cF)4@{rKbKz3T!fQSiK#x5Go@q4 zR08Dn`a;1}N-|B6N>Ii9)>MMrh=Fg3)#5P1zF2BEkVj0Vv1sXJ20fKYC8aZ2sXbB& z3Tx084w$XM6l!s8xiMh%hEmDJcM4y~7_x$E0^8>efeg@0QBG>-&M8)}CCLIMN%<=; zB5OSA3s`~^&S25#DKs)pO()Zmg}JHJR00P|ZABpPt?r05=e*Yl8!QL3KbcMG9wIB{ncEGx(Ikx7WGRDBwfrW4rH19SFAKh z^p_}PCa2XLN@=C{!a<`>N|bOoY6ul@U`Wk_qkg@RZ*ZHvQoo5zGYEOyxSnkHTRfV$ zhOHo)D6n40V>yf-jnl%HC?SZ)^KqFdhe%T?$r_qLtANcOsn4jx0g(_QU~3d{6PZQC zAYKGv>%|-dhEbP6qEge$It|UH=W!i+vchNL!*nYp#`yEGFV0Ou330k&^&1zSL);Xo3c=CSlOPLA_S$Hu6|O0}u3+r=eOvuMnNW zZRIP3W}?Awp2lw=+ljguq_XhcQKOdTMRf|I+2eLtKv^?Ms8+Z^J-)(?e_K&Q zcDpP*yA6RY5m49*D%wqmhHBIr!ENjWB-J8A~K)dFE!bW)$1qM&^t zikd(}fTrBwaR*I^G-3kGf_%U*&6{k?1Q-UL^6SZXF-Hne!JtWsQVPVx3K-P?s04_y z;-+M}SCN#2+Ny)y8m>m8R4WWd9yg&T+wBG(ivk+30t53~_)-KIu3hhmxltV)HE4m6 zYH3b`nnebk2Fxhj8r-9>o5^;!0qBvdm6FU9Ma1;4dUnSRK%{^`QglE?FcB~S#BYHu z{AACBBnwZI(D9@|f;6|;EdFe zUmq00FsGd03p?myU)Tu=5Qq)IR0zQ!BLwn~1g;d=@U( zpw?hcl&`0VEXFiOSl~o?B8>~kQshNJy2Z>-N}UhVxfDB%#EcVhSfx}Df&P=@5zJ01 zod)Pw7&NFAe$a#D>}EH6Jx+txBGw@^m4fGX77alUH*U2E{Rr6~(13bkU`Jqh$q^J0 zT1m0Eg{P0{c-*j=>^1|NNf@=#s3C#Gr7EFWPi8qyWNxxd9G7trD%%|c3DStZcp^#Y z$=qOas7Sb9)4{}}Q+U954oUbSGZ+bBU~~=>H|7ClJZ6f;4^#otr9M3v&ITPC6(DxE z2~5#|S*KnVVzIrUWWzx~gQ)jJ89KhuZQuxSl2jA(nmxcEoPy-F!^GFPB~CR-26|!8 zqEQ{s0)91N&`@z6r`A%8LSszbi_>_F#at%p&j%j3OEO|6VM}16E=9bK+LST1oDY=JUt*{NJtU_ z3=A)i!~xrFdPtQtOp}&o0R|@pL#$|=C+88pQ5rH4Lt2oI0IAuF435GjS&h=TnWBWa zdPs}GR;R^lp<+1ffcSdIjva&}(PWBnONgrDzhMgCYF8Otraq zxUiv^!U={A7(S2yGGK5*Wv5~)hdx8B0R+K}rvVKLQKAF|bAu6sL%{H2U}k_MN+bp* zB_FcI-l2Mk7!4}+Q1i_Jc&{J?YZb=f+6j4fmWi_FXf@^(}AvkEED56FAwxWEs z-V-Xm$BGzMo85MUHpzy)h!?ei^yS`>Bm$}RU-MnX^$k2y*pR#|>M&Rna30B3ln_i3 zWD6`_jl1Zcm?XgFS-`SuNnWyw8gzlt5i%w(3B?VXv>Lk^-GCqk1t^&yFa8UtDQpM}7M z66{Qp#v)XxC1i=pMV2~ zR$EjMe3|H=6vs&dmmJr31Yw5}(8h=YZjp+>dJ&6QbOOvwf$e&-8WStwL~_FUTbutKEj+33|0%KW zY5&d0#9XkTfc?KAxy4e#TCj(tBp0zF!}NgdSaRd)06V7d>I7 zF->v^lHFlL9E=Du&1KXjmt`tu)KdS=qQ7VIA9pZ0$=H`UbEe@9%2BS^R1%|}{ zgBgY;VhEC&y+tO#Et)!uE5H&q*p&b?gCMTiMkABldfi{c3@Vx;zL5UzMk3tyA=0SoWC6Gn{ zYf&x+Y0a<++YX6jkRN0FFi{W+=-E+JEQUgAuq6}+ zpokhwvOrZaJx72_LQn$Zpi!wDBKbH5Od5wMRE`6aX(76R6ToD2h-u~!qjIo)jB?UY zln2y|2yh4u!JHtX1p7&tCqc}pA`KQq5f7&H!D2lEAe;)8AwbxwG#Djv`H(sQt9)D~ zs?oz*GuIN;@?e9W>&LVt*sSJ~P@NkFa23Y%TG(OcfnA>fcDr~+%)o$sQJxnyM&KY& z7HV?B5j7u&qh6DFcwH`^8zA*j#Gb`nJz z$e+RXAtDcy5oQO~xQGG;66|PDEWm;^HVF|M(J;iOVd5YZ5wOz`aT*#`a@alz8jYDb ze6_@h#l0K}Bq5`T1P3Kbg%}CqXnax&M3!-kE@?DMv2tvf42e?19G6RGfM^7cKPU@f z3?(N_m9tT%n?pq8I*di<&~Uvx7$wL#X;BnFBM*W|P`v_9Lxe#ztl;Pnxmtn5z@M6_ zFe!k3j6OwN!S^AUSt+y&xrhf<+F_9s2}+dI*cTul18W>rqVmIXE|R8J!*2Hc%8kgy--X0feCN{8VF1 z5eV{Ps7V$JIeAo{DQJ%v`H;XYvd5G-pC2^)VhIUft_B_o5}&VwEFJ}g!?#AQTssZo zdwff{z!r1} z<3f$VCvXJfVv8W^bIRCKkAQ|cqYimg2s2#(`ru5VDCiFaEyD5AWKKQZ!pVrs!c2x(6$;4cJ_el=^~!l_rk|5=%9R3^UPZRctx%ePL&M}5 zP#TrPw9Cl|!RI76P_`5@AZ!mRv%n%XHv=`sV1`Q&MZH3HNG+zI2|vfCmO~0iir@;O zQlya3fPrc>3WJ|Zap@fjkA;^(G(`c7;9H3{ND1azCDEZ!N-aXZ+U-;t-9jefqbR*3 zQ7mctB&pcria1nkzr^f{(^aTYs&tW6DoaczRMWz$fJF`=ETM`fl~Xj3SIy_5VG_bq zYhYaAA@MzGCs%3Xiuvl8RHY-yGSmcv8g(n>8fi!);%Z15vrx++8O)jtx0Xe+AR3xb zN9AG>EuW;1lYlEn>o$bBK0f#{`rQGNmZ&kgH4(RtqcPh7y4NW!7Auz`)Y<)36PM1? zMOikzCM}_da1Ev*vorMam|aEYIrKJ-L#Y*+^%2;Kl4Ux5TF8a+lxl;_?Urk`N`uwn zk!y`gL)h+>Ypp6nn%gHQJG4ew%#V^iMx%|Jq2y)Qj1fyfMGkw71XfT(jz>(WJ*4N6 zX(qccY$P)fQ;Zg|@(2nu&JIQGUJhnf`D1P!KWcUX@2OV;n@OZZn2&-n2@?~1wa=p0 zkf?wii$6q8<6E>AhLnov>|v`UMiY5GLaWt6S9mkr)~JG^r$pE`HkXO%5_X%K%kt@{ zu+1Y(i+Y((8&yG|>0v%5a=>gKH-KYiGFu`LDY1x>qoK;^cCZLw0+rLQwsJG5dYRoT z;*qFkhMkrsfCYBHLqZoys2+pEMi&_b0g)rY7Q1~BrjsX=#C^n=)8LR2s5Fl=NS8}} zX;v3xL`{B<)}=~QWC#Tcmp4O6rAehOhEXjLDshopuGHvgIw3GCt(RsNxhYy5RcM!Z z#14Z{=s`WUxX~yKXgoxbDL{*wy#kM!MI*Vq7LgSd(!<^u+2)`T=sunYBa66vpULPD zW{9-DsL^T3kbC@Gqbo*NQ~gGdhnHa#`y*tpDZ_?ka7DhD2zOC3jGhd>C_|nR6$LED zh(ExM2ZT!Vk6xih#4ae1_TB9+9*B1#ex=NT*(l#g9%3>5J24_VVs08w4xBk zBU??TXo&2fP{npbSjwX*173F68K#F!!El&vWeQAjQ$(Rok`n=wT*^$N5;1vaoDD#`>VO_v1i!c}hQwXt=%?yV# z9Fnmh1~Y+2wQ`P|sT0Qt1d)Up^CtWRl*p2ZNou*y%JNXiX#}e%4fawq8%`Xn4Rw4qWIa z&}T!^aqU*bEdYD~pHI?lXG+#hI;&MYH*jLhAifXyPho~q4a>}4BOBtg00X!%4J%>* zIE3{&kHbrh0PlwbtYg3*{H;Gwlk{A1fv1ccHFBjyt2XBL(M2vLDEIHaBAwcwVf7(& zPeukkmUKp`j5@y?C`n~-89*EXbO88)a8=Tss+2;1!U(YO#@HfAKynL!Pak-yfg_U4 zPcjt=a+9uB1Mq+=9f2Z7?cQL9(5p=LM64twUF*P&573j82z>lu6Xi?1Aj3!wF&wn8 zE0lCx!%C7c>7Ip69vc2;LHv5Kg`wT**S5P%oplJXZL99Re;QAmx#sv;zVu^Et5 z57DtC?2*9YBH$r`^(fZ|>4LC5%F_VHIP8xmfkqTK!}xK?W@dvxl>>6P*eEI#LvAYD z7!}4LuY_%Zgk;ETW?NAa8}gxSCyoMa=7+#yEiyv>DBFvPfX`XL_Cq2!l%Z$)qar_) z5o8BYQ3ML<*;GvIg~CjB8YHGe5hjO?NmNkG$Kj(A4-}_zq?m*S5!E16Ce=VBGe?U` zO%RF5(PL5+=j|{5S7XCKr%Z-*K;BO`aw)0hlt6Iz)%r7 zstUvMD8Qy_KG?8u5e)2T6?!fYQk!7~6@&rQ4p@nD#gLi+tMyzps<9?#uQ>15>hL{2F{IysY)g+jw2CNNw9-$c+{ujD3ZQN zwTfq#gpdTPQpBVlgyK^bd*YIqR{(rKU;R%2Zv5i|%Pg%^lCjC6*yuk(mqlUAe}p^# z>kw+nzeQ^C;!tgIO#MrcQ~~0i|1oZxN+T!3wkaUo`rkxllVp-nSx81&~-8h|21@7%$u$_9-B%KTfJDw4no6pAQ)f3QIDIuV=`NF~w8=@b$%8H5J14u%3&qbDU+ve9p(8oRnyE?u|{ zZajiD`%$p_?+Xqsyjrqy^TTCQa>pH5f-dOM=$wC^`jqm5sZ7r3wXDlkFFh?~weNk3 z?5j2P6?;V?{%gU>200h%O3l(j19MJS6Vie`sNpnPtEKlgb%7Sg@&^nP#>ND;PD2K4 z*k9qntvRo^Z1`9raJfmPq#V9*^7RlxvQNblnJBAR3#ZWjl%qW0Gb^o0jR73VE$YUf za!i&=)&h9~HXGQ#)sjjnR>5EA1d{h(g$=nw)_^y8`hdW(MjU3Vf(NG(q~KJyr~(fS!wVf)7gs8tI6|rq8#K_!5CM?C)u3%;|Dvt*ZBF+9{f$Iy46Zn(UshB&1NgZd&?O)Sa>C>0+ z^VYq7oIb#^^o)VWn^l5*YNdgRY#4!)e+nmmj| zuANnJ{)(nI_N;Qh{{42^)&tX~wz!#9Uw^dTHtL))TjiH$G{3fBOqs0i)0$t!7W6G| z%~1(v;3aOoX?61C+gaig#G2E49lp~d%RQ2e)U5Y-Sw1pZr$B)d;e^m@|}s4Hfguy+K8Aq|Kc->gmXsm*>1%jhddu?l`pEd`aS4Yjg+!Kcbh#sb5(F0Iq~Vv`^J^JkMdpwnpGaJv*Y45;ilT1t8Cm==URiOCE5<{wQFL9uKO#L zKF-xuXz+{!kJmLTS-puXv+2T8OOYAlt1N7=c`QFO<7Vk{!?TRj>ekMycVoDI%r%q7 zc*Z0oo}Ol&rV-b-_AkX z1PxnAC)N}UYgR|mto8Ku162ZN(;h84wa95Q@j!bEFM!w8ckL-#*=%lVujW-JbdgVL z)uN_M*bsrwOIgCrUE^Kj>_v*7mUvI;mQC9-+aSx~^b__Y@Fr5GzLi#FdjVIV$XV-4BTuJ=}-XuhaPefZ>}_=!#HCF|Q?uwEcG;xQH$o}74Z@5{z_ zb6!@oHn(=W)#_HmTlH_PJE$nPi`e@uMNx0jn2BSyjv4oYdV6#Jjr@!BRgRvYx3!-9 zu8p@%-^ooU+a{NsJZ_m-?K@rb^z3DxWwKL5XA7s%dEQxgI(_-reEj$P=$!o7y{MmW zei(j#^_G!|P4AtrcHi%Ge`5U^^>gdjtFNvHfn?ojuww;L+`QO0n~ zaOaPn#L3eiA6D6#uN%$E%3k^C_jboSFDzKzDcihcoeovI8$G*rLG4~`AGKAsd(gJAzO}xr&a?jh>i4@Z?3v-}VaEII z8|J)WUUzhH!RyUn@t3SAVZPU?bV{Vjh*{9{2lY-x`%+?=mee|cJ3o2Ev^R&*Xc69^CGp=oF7|2Yw zq>t$p^Jkq>Uz~iy@ZkMn$=R`YB6~c03eI&p|8Wyum{wI(HIp}b-=yCs-l?Bospp8P ztF)QgRU0R~F+FiU`Mq=d&LcaI?fi3YBzJi~!dv6VQXeDl3tpUkdguMwLRlGRnH3f9 zrYtPOEu$&lxe~czjk4h~yLXn{xph!u!iiqZx}U0D3jV;34DDEvR=G#5_rbd9ZK*3- zJdAHr$NI;jW!m8;gu~vml|OF(iT%9MV{40QN4W=hdy&(?OWOyTiioD_+UnQcuK;A@IkorHArYj_J*k?W7T0dl#nL1?c5d1i2>Yg4W&4QJJ zT7ny;WY8a1!)tpzA9!==r16s$w5BXQzVwJ=$ojYIPr}WOqgJQ&pmb`PL)cASKxA(@ z=s5jY{qy{;$1}yPf_mH5X}^6Ezv7Q+HnaI)eWi8IwU;rTb?!m*aP`|SZ_c>sJ=EyH zivxxIX*OCkjO9i?4g0W@_850n$sT%UW%rAlrO_Tcw_JOb`(T_W)x$sk^bq&ZSZjkT z-cgrNq4UtyxxMY{p4`rZH^b<)oOVaMRdMfi*K$89u&v8>UYy#r=kZsjL)S-Lmo>jM z;lUMsKkd7M!>@-t%31V!QDO9Iq(byk!M!anbkj}xp68FPyxJrC*5T*#pKsiXYaixn zjNLMNS9OfDj9X7$vNlJ!%E>$Z`~8LY2i6}q<95#dJ{up+zOe7&+3i<6_{oxEOYbe; z`Injvx8eKW$X(fxXRq|V={b6;;b zW*BA2I& zoHz8@=#h1Xx7UU`Z&`oy#O#%;S6)6eaO$d_&o&2^Ud$={pq{!MTgmFtq2KI&Igf^Y z9DC-Mt#hW}y&m>=-23B+C+D7j-Nz|6vVY4h+wac%9)*O|7F|D_-1eVJrH1h-H`q#B)|mK!I9zrr;tFh zPv0VeqVvT?i0VHffn=$_BLQmiWV0gmPb5%u{{sp9KmtFIzz-zw0}1><0zZ(z4j+y=s!w$P*XX@>4Qe#!SJs*~goD<4eJ5T$&Dtp+Y zGnwkP>fu{P&75j~hxOUm{l>^!Wpy{wB>%)qMcw=c98im&W=sZ=z@vXWWzo})$*2E# zK~gG^*4J3f_s9VRcK(XR6p{D`awrb5{DmC;3QvGw&z~`bMqh#mynr#Dj3R(A&6n^E zg~Fs|rq=#0h_FaozPnO;u?ypkZS}4>94@hT_|>K@1`eriO06(rv|>f? zg6libUo({xAo@7@MBu!A^s38yQ@^^!|lsvbL2bn*%fAvskVCvVREge3&}s-Jo6%DQM2^Y zE2d7IRIPHX)?u`9&q^js_4G*WWwI&ic?QkX`Gl_`s}bx*hW> zeIC@S+N>()O8RH=_cssPO1~Z1w#&d-H%3(3S+fEgE;$%)G-TtZ@iVDo(khLrAz+;o zTxwOT?8L~R^koS;fm~&Equw2iUgpv|ud}$KN;md*%96F4a5HcGmO&-v)~Vg|IhAj(krUb@Tny^dP`QVVvn^y-oN8wWEpv@C5HE&%1{IAi+ z>W7CLpShb-fAEp*+(6wrS^ewQDLG@*mYNextBF&TwZ?_JW+ojn-tgnma63YuC}uSF9D#37JK!(y$y9G zIkN+Q5iRZd=5_aX_I$dnJH6ZC?fAC9yqt&As+$VB_rG7t^ysXUwd(2cF_o83w9m=t zk(2XqyXBRV&T}`IdHUS(Z75OjlyP}L`hD8{TKC!af7&zX4dIm3NtoB?RN>Q0@9g=% zHyl5!#^CM;DmK6Pw(iPlg0b-ud-)y1W~&Cz_IUY_5z@R|8(-@=T7b3bQ|d3Nq+==sDheeqKKCzXb?nD1ANS(#$#yuaUm z_xKk))3i=~*Ybjs$Go*y$z1c{ik{m3j5%!txp0*7;CkB@(mgS6arue6_e`7ebQ5K8 z2|Xoy+Hosorr`4J|TkpX!+9STpX69aZY>nY^M(jU}B{kb?U! zNHpzwcck~o+>Kvtb!4hdR%y}s-3IF)rsJ@^`g-D?`I|EX7s?o#^==%fZIo`R-hD}8 z%?QL_p=wim-HDU^*36R8yss9U(&6Gyxg&0N8a-?9vC*-;A1hMsUOi+*g!Q1@ zCHz{CdnfkSo&Kxq?Xs-{vU~2Yn!7H`?;C*#TK#mS)C10VQN2AIDz}3sFR7mmKR9k% z-{=wi(~|0qVBT?3sQ$+R>G=G#B|jGoCdjKMMwL4zq7E_EZFF)+h-5xoYTe#h#OUD$ zm3F{u>}^Ymc9U1P6%RVFhWwDSqJFo*p1er!pU!R6Rf^Q#I-tki$Gr=5jbE34J!O#c zRO>`d%BV)xQhR6a%Hp6CT2Wf-CvBdj5SF!;wf?&3x3zOma9EPf{Cf*S=p0;8E@Q~@ zYvndIe>|wqjh(T!bAH3FllwO5IrQw+aO_vrz{rR8mxsQ&;j2(TOSHN|o&AzBnc}A- zH}8Q>tTofmv8mOukqpX2fc8X-Ctefvc`tv>x9X4#(Z0>mO7H(Ev%Gh#aDfO$;dQ!sESBle>JG$># zVWpE+k>)y<*)KQ)RLn8OdB* z>5OY>3o^J67hce2L22%6u5YY9FDs9gw|wll`dx@k#0@3aCAX)$DJ<+n7pZU773QZ+ z%QA-N72vc*)7sByG3_X$z;VIY-fr{O_O_s1v_$e&T)DKP@kQrKJw}(P zv}SMn(GzRSbnn|%Nt^mdW_Sv%x5HV_*m~n7WBfp1|G~>`&M&)kf57R|F15?|K>Q$l z#&f1Ly*6FxIp}_U{?Z@nHLo_*I`3WW+1SsD`KCHIo_dTg>9Hy&v(wq!Q~l?D>{tG$ z;a!Jst`e@o$UB+WXIy^0?coo#c%v2!U$B48nP+Q%-h1Gr;Nkky_m)vkpWmmy-u+JB zLrB5U8;YB2^A}w_y@Ph)k4vwXJXlwe{_}~JE9M;Qe67>P!Lut}+1SV1-ka&Y<<0iu zn>uWHQQ#`@iBCjjLwbRJZ^Q7F`&8o={o?rrk0;^>J2vQ4xXrzd zylq3Q;Q5&szrE=BxZmaB_bGd^%zV;ZM(&*4p}E8R;%|b_4n4d0;B1HU{nz@RuRJjB zL-vRGrw$*smeQ51SF%^xP+3E{=cOK$!mF&UQlrf2@||vvxnl{w4$hGnWtq5yMZTq8 z?Rz@ts{`3)_0XS&^cj+0wM#8%?1)jiMj4}BdAEWM zHkHoonOUi6K6fdvIoI>sd)5V#p>{$0$z)pK{)MG)%x-ft63>jbWkfs=2Qv#dZO*u| z@8tCx*IhSoiC@%8WS5mQaY zS~?c3cddMK=~b}21zr2T)Qa9VD46xj2jzt(QE)2aNqS1FW$hB32-eHjOpi>04Bkld zfwdF%Eix|ZmEZFAi#r=9>!-A9{h;R#{c1DbWP|_ct_{eU(QRtKYFnk(Hf-em3A37a znY`}M*-4+@>F;CNwQRLUr~T6Jmrw6@_uRW(!} zbw^gNT-tx*tnDB65ejlIy}dQO*7UUW?3dj)dun+mTTUN2J#hJ(6~}ujx7C+DvK5Bd6-Gh~rdOYgY2QQHO6uo+UKI=X3 zcx9)GeU8#&H0ncI!PY=y>)gJ*u6(LjxT^2Z&n3YHq8q=J#>(H|=;nDXlu-m)317f)C`aKZcqJ&rhzXuCD-cERw`Fs|>D_a7gdr|ff% zd(gO79Dc>wJF;p^k*vy`33KjunH_$QUVONr54L#r<_!9$hl_&foui+tz1}@NA}u`d z;?tdRJ)Y(qd%ob!I36K^_jKQXdYAcp;yxKKJ>Ju~aN__$}+%oj&8`9~2C(9+~9GZi_HNUHLqt4dPvtD8sdtbV`dup68 zyr(*c-aqHg*f%p@Z(QnYT=hsF9=`F}s%Q8gx#JS}XZGdcRX>-jSoN*xU-04A17S%F z$c9pJ@aCIhF#Q)i{091cdot{M%>5O|k^TeZPR8#35qu+X9Z7fyaJn0uGRq`X_}@8{ z_kRHZ|0`fwbWV?29D)3I08T58za=yO69E4j@cZ@oHfd2sCKLGd-0RI0q055`kU&Gk{EdY-NthN+lDmjJt z=O>j;qk)INY^lX*R5FP=0)tCmdqnnO)K2=_Io=z>aS(g9HXCJiSNLE^V0 zigIa01`)jB^_zl3B8|vmfhhX-X(S^3U(#6LHv;NXnBb|gztshAwx!_zOk{wV_4kcX z$s~Z3zfYsH$p0wEAXEOCMoGRG_xrjG%AZoefzXO_=|m>&8$o_aWBo%4;ORHg-z7sQ zfd>fx)(A+W0$;#)4b!R2Z&dV z;z(FiX&~}04_G7My}dLN13V1qwfRz$FE(ye{4i!pFk}pbip&i#$zY{4Z;r^gDgO(J C{81+W delta 9 QcmaF$pLxeArVSQL0UN&r{r~^~ diff --git a/dev/guide/index.html b/dev/guide/index.html index e1a8789..ee6a594 100644 --- a/dev/guide/index.html +++ b/dev/guide/index.html @@ -1,5 +1,5 @@ -Guide · SparseIR.jl

Introduction

We present SparseIR.jl, a Julia library for constructing and working with the intermediate representation of correlation functions [14]. The intermediate representation (IR) takes the matrix kernel occurring in transforming propagators between the real-frequency axis and the imaginary-time axis and performs a singular value expansion (SVE) on it, decomposing it into a set of singular values as well as two sets of functions. One of those lives on the real-frequency axis and one on the imaginary-time axis. Expressing a propagator in terms of either basis–by an ordinary least squares fit–then allows us to easily transition between them. In combination with a prescription for constructing sparse sets of sampling points on each axis, we have a method for optimally compressing propagators.

SparseIR.jl implements the intermediate representation, providing on-the-fly computation of basis functions and singular values accurate to full precision along with routines for sparse sampling. It is further fully unit tested, featuring near-complete code coverage. Here, we will explain its inner workings by means of an example use case. In preparing this document, SparseIR.jl version 1.0.18 and Julia version 1.11.1 were used.

Problem statement

We take a problem to be solved from the sparse-ir paper [4].

Let us perform self-consistent second-order perturbation theory for the single impurity Anderson model at finite temperature. Its Hamiltonian is given by

\[ H = U c^\dagger_\uparrow c^\dagger_\downarrow c_\downarrow c_\uparrow + \sum_{p\sigma} \big(V_{p\sigma} f_{p\sigma}^\dagger c_\sigma + V_{p\sigma}^* c_\sigma^\dagger f_{p\sigma}\big) + \sum_{p\sigma} \epsilon_{p} f_{p\sigma}^\dagger f_{p\sigma}\]

where $U$ is the electron interaction strength, $c_\sigma$ annihilates an electron on the impurity, $f_{p\sigma}$ annihilates an electron in the bath, $\dagger$ denotes the Hermitian conjugate, $p\in\mathbb R$ is bath momentum, and $\sigma\in\{\uparrow, \downarrow\}$ is spin. The hybridization strength $V_{p\sigma}$ and bath energies $\epsilon_p$ are chosen such that the non-interacting density of states is semi-elliptic with a half-bandwidth of one, $\rho_0(\omega) = \frac2\pi\sqrt{1-\omega^2}$, $U=1.2$, $\beta=10$, and the system is assumed to be half-filled.

Outline

To provide an overview, we first give the full code used to solve the problem with SparseIR.jl.

using SparseIR
+Guide · SparseIR.jl

Introduction

We present SparseIR.jl, a Julia library for constructing and working with the intermediate representation of correlation functions [14]. The intermediate representation (IR) takes the matrix kernel transforming propagators between the real-frequency axis and the imaginary-time axis and performs a singular value expansion (SVE) on it. This decomposes the matrix kernel into a set of singular values as well as two sets of functions. One of those lives on the real-frequency axis and one on the imaginary-time axis. Expressing a propagator in terms of either basis–by an ordinary least squares fit–then allows us to easily transition between them. In combination with a prescription for constructing sparse sets of sampling points on each axis, we have a method for optimally compressing propagators.

SparseIR.jl implements the intermediate representation, providing on-the-fly computation of basis functions and singular values accurate to full precision along with routines for sparse sampling. It is further fully unit tested, featuring near-complete code coverage. Here, we will explain its inner structure by means of an example use case. In preparing this document, SparseIR.jl version 1.0.18 and Julia version 1.11.1 were used.

Problem statement

We take a problem to be solved from the sparse-ir paper [4].

Let us perform self-consistent second-order perturbation theory for the single impurity Anderson model at finite temperature. Its Hamiltonian is given by

\[ H = U c^\dagger_\uparrow c^\dagger_\downarrow c_\downarrow c_\uparrow + \sum_{p\sigma} \big(V_{p\sigma} f_{p\sigma}^\dagger c_\sigma + V_{p\sigma}^* c_\sigma^\dagger f_{p\sigma}\big) + \sum_{p\sigma} \epsilon_{p} f_{p\sigma}^\dagger f_{p\sigma}\]

where $U$ is the electron interaction strength, $c_\sigma$ annihilates an electron on the impurity, $f_{p\sigma}$ annihilates an electron in the bath, $\dagger$ denotes the Hermitian conjugate, $p\in\mathbb R$ is bath momentum, and $\sigma\in\{\uparrow, \downarrow\}$ the spin. The hybridization strength $V_{p\sigma}$ and bath energies $\epsilon_p$ are chosen such that the non-interacting density of states is semi-elliptic with a half-bandwidth of one, $\rho_0(\omega) = \frac2\pi\sqrt{1-\omega^2}$, $U=1.2$, $\beta=10$, [...]

Outline

To provide an overview, we first give the full code used to solve the problem with SparseIR.jl.

using SparseIR
 
 β = 10.0; ωmax = 8.0; ε = 1e-6;
 
@@ -56,34 +56,24 @@
  3.8931895383167936e-5
  1.5472919567017398e-5
  5.992753725069063e-6
- 2.2623276239584257e-6

There is quite a lot happening behind the scenes in this first innocuous-looking statement, so we will break it down:

Kernel

Consider a propagator/Green's function defined on the imaginary-time axis

\[ G(\tau) \equiv -\ev{T_\tau A(\tau) B(0)}\]

and the associated spectral function in real frequency $\rho(\omega) = -(1/\pi) \;\mathrm{Im}\;G(\omega)$. These are related via

\[ G(\tau) = -\int_{-\omega_\mathrm{max}}^{+\omega_\mathrm{max}} \dd{\omega} \tilde K(\tau, \omega) \rho(\omega)\]

with the integral kernel

\[ \tilde K(\tau, \omega) = \frac{e^{-\tau\omega}}{e^{-\beta\omega} + 1}\]

mediating between them. If we perform an SVE on this kernel, yielding the decomposition

\[ \tilde K(\tau, \omega) = \sum_{\ell=1}^\infty U_\ell(\tau) S_\ell V_\ell(\omega),\]

with the $U_\ell$s and $V_\ell$s each forming an orthonormal system, we can write

\[ G(\tau) = \sum_{\ell=1}^\infty U_\ell(\tau) G_\ell = \sum_{\ell=1}^L U_\ell(\tau) G_\ell + \epsilon_{L+1}(\tau)\]

with expansion coefficients given by

\[ G_\ell = -\int_{-\omega_\mathrm{max}}^{+\omega_\mathrm{max}} \dd{\omega} S_\ell V_\ell(\omega) \rho(\omega).\]

The singular values decay at least exponentially quickly $\log S_\ell = \order{-\ell / \log(\beta\omega_\mathrm{max})}$, so the error $\epsilon_{L+1}(\tau)$ we incur by representing the Green's function in this way and cutting off the sum after $L$ terms does too. If we know its expansion coefficients, we can easily compute the propagator's Fourier transform by

\[ \hat G(\mathrm{i}\omega) = \int_0^\beta \dd{\tau} e^{\mathrm{i}\omega\tau} G(\tau) \approx \sum_{\ell=1}^L \hat U_\ell(\mathrm{i}\omega) G_\ell,\]

where $\mathrm{i}\omega = (2n+1)\mathrm{i}\pi/\beta$ with $n \in \mathbb Z$ is a Matsubara frequency. The representation in terms of these expansion coefficients is what is called the intermediate representation and what SparseIR.jl is concerned with.

To standardize our variables, we define $x \in [-1,+1]$ and $y \in [-1,+1]$ by

\[ \tau = \beta (x+1)/2 \qand \omega = \omega_\mathrm{max} y\]

so that the kernel can be written

\[ K(x, y) = \frac{e^{-\Lambda y (x + 1) / 2}}{e^{-\Lambda y} + 1},\]

with $\Lambda = \beta\omega_\mathrm{max} = 80$. This is represented by the object LogisticKernel(80.0), which FiniteTempBasis uses internally. Logistic kernel used to construct the basis in our problem treatment 𝐾(𝑥,𝑦).

Singular value expansion

Central is the singular value expansion's [5] computation, which is handled by the function SVEResult: Its purpose is to construct the decomposition

\[ K(x, y) \approx \sum_{\ell = 0}^L U_\ell(x) S_\ell V_\ell(y)\]

where $U_\ell(x)$ and $V_\ell(y)$ are called $K$'s left and right singular functions respectively and $S_\ell$ are its singular values. By construction, the singular functions form an orthonormal basis, i.e.

\[ \int \dd{x} U_\ell(x) U_{\ell'}(x) = \delta_{\ell\ell'} = \int \dd{y} V_\ell(y) V_{\ell'}(y).\]

and thus above equation is equivalent to a pair of eigenvalue equations

\[\left. -\begin{aligned} + 2.2623276239584257e-6

There is quite a lot happening behind the scenes in this first innocuous-looking statement, so we will break it down:

Kernel

Consider a propagator/Green's function defined on the imaginary-time axis

\[ G(\tau) \equiv -\ev{T_\tau A(\tau) B(0)}\]

and the associated spectral function in real frequency $\rho(\omega) = -(1/\pi) \;\mathrm{Im}\;G(\omega)$. These are related via

\[ G(\tau) = -\int_{-\omega_\mathrm{max}}^{+\omega_\mathrm{max}} \dd{\omega} \tilde K(\tau, \omega) \rho(\omega)\]

with the integral kernel

\[ \tilde K(\tau, \omega) = \frac{e^{-\tau\omega}}{e^{-\beta\omega} + 1}\]

mediating between them. If we perform an SVE on this kernel, yielding the decomposition

\[ \tilde K(\tau, \omega) = \sum_{\ell=1}^\infty U_\ell(\tau) S_\ell V_\ell(\omega),\]

with the $U_\ell$s and $V_\ell$s each forming an orthonormal system, we can write

\[ G(\tau) = \sum_{\ell=1}^\infty U_\ell(\tau) G_\ell = \sum_{\ell=1}^L U_\ell(\tau) G_\ell + \epsilon_{L+1}(\tau)\]

with expansion coefficients given by

\[ G_\ell = -\int_{-\omega_\mathrm{max}}^{+\omega_\mathrm{max}} \dd{\omega} S_\ell V_\ell(\omega) \rho(\omega).\]

The singular values decay at least exponentially with $\log S_\ell = \order{-\ell / \log(\beta\omega_\mathrm{max})}$. Hence, the error $\epsilon_{L+1}(\tau)$ we incur by representing the Green's function in this way and cutting off the sum after $L$ terms does, too. If we know its expansion coefficients, we can easily compute the propagator's Fourier transform by

\[ \hat G(\mathrm{i}\omega) = \int_0^\beta \dd{\tau} e^{\mathrm{i}\omega\tau} G(\tau) \approx \sum_{\ell=1}^L \hat U_\ell(\mathrm{i}\omega) G_\ell,\]

where $\mathrm{i}\omega = (2n+1)\mathrm{i}\pi/\beta$ with $n \in \mathbb Z$ is a Matsubara frequency. The representation in terms of these expansion coefficients is called the intermediate representation, which SparseIR.jl is concerned with.

To standardize our variables, we define $x \in [-1,+1]$ and $y \in [-1,+1]$ by

\[ \tau = \beta (x+1)/2 \qand \omega = \omega_\mathrm{max} y\]

so that the kernel can be written

\[ K(x, y) = \frac{e^{-\Lambda y (x + 1) / 2}}{e^{-\Lambda y} + 1},\]

with $\Lambda = \beta\omega_\mathrm{max} = 80$. This is represented by the object LogisticKernel(80.0), which FiniteTempBasis uses internally. Logistic kernel used to construct the basis in our problem treatment K(x,y).

Singular value expansion

Central is the singular value expansion [5], which is handled by the function SVEResult: Its purpose is to construct the decomposition

\[ K(x, y) \approx \sum_{\ell = 0}^L U_\ell(x) S_\ell V_\ell(y)\]

where $U_\ell(x)$ and $V_\ell(y)$ are called $K$'s left and right singular functions respectively and $S_\ell$ are its singular values. By construction, the singular functions form an orthonormal basis, i.e.

\[ \int \dd{x} U_\ell(x) U_{\ell'}(x) = \delta_{\ell\ell'} = \int \dd{y} V_\ell(y) V_{\ell'}(y).\]

and thus above equation is equivalent to a pair of eigenvalue equations

\[\begin{aligned} S_\ell U_\ell(x) &= \int \dd{y} K(x, y) V_\ell(y) \\ S_\ell V_\ell(y) &= \int \dd{x} K(x, y) U_\ell(x) -\end{aligned} -\right\}\]

Here and in what follows, unless otherwise indicated, integrals are taken to be over the interval $[-1,+1]$ (because we rescaled to $x$ and $y$ variables).

  1. The function first calls the choose_accuracy helper and thereby sets the appropriate working precision. Because we did not specify a working accuracy $\varepsilon^2$, it chooses for us machine precision eps(Float64), i.e. $\varepsilon \approx 2.2 \times 10^{-16}$ and working type Float64x2 - a 128 bits floating point type provided by the MultiFloats.jl package - because in computing the SVD we incur a precision loss of about half our input bits, leaving us with full double accuracy results only if we use quad precision during the computation.

  2. Then - by calling out to the CentrosymmSVE constructor - a support grid $\{x_i\} \times \{y_j\}$ for the kernel to later be evaluated on is built. Along with these support points weights $\{w_i\}$ and $\{z_j\}$ are computed. These points and weights consist of repeated scaled Gauss integration rules, such that

    \[ \int \dd{x} f(x) \approx \sum_i f(x_i) w_i +\end{aligned}\]

    Here and in what follows, unless otherwise indicated, integrals are taken to be over the interval $[-1,+1]$ (because we rescaled to $x$ and $y$ variables).

    1. The function first calls the choose_accuracy helper and thereby sets the appropriate working precision. Because we did not specify a working accuracy $\varepsilon^2$, it chooses machine precision eps(Float64), i.e. $\varepsilon \approx 2.2 \times 10^{-16}$ and working type Float64x2 - a 128 bits floating point type provided by the MultiFloats.jl package - because in computing the SVD we incur a precision loss of about half our input bits. This leaves us with full double accuracy results only if we use quad precision during the computation.

    2. Then - by calling out to the CentrosymmSVE constructor - a support grid $\{x_i\} \times \{y_j\}$ for the kernel to be evaluated later on is built. Along with these support points, weights $\{w_i\}$ and $\{z_j\}$ are computed. These points and weights consist of repeated scaled Gauss integration rules, such that

      \[ \int \dd{x} f(x) \approx \sum_i f(x_i) w_i \quad\text{and}\quad - \int \dd{y} g(y) \approx \sum_j g(y_j) z_j.\]

      To get an idea regarding the distribution of these sampling points, refer to following figure, which shows $\{x_i\} \times \{y_j\}$ for $\Lambda = 80$: Sampling point distribution resulting from a Cartesian product of Gauss integration rules.

      Note:

      The points do not cover $[-1, 1] \times [-1, 1]$ but only $[0, 1] \times [0, 1]$. This is actually a special case as we exploit the kernel's centrosymmetry, i.e. $K(x, y) = K(-x, -y)$. It is straightforward to show that the left/right singular vectors then can be chosen as either odd or even functions.

      Consequentially, we actually sample from a reduced kernel $K^\mathrm{red}_\pm$ on $[0, 1] \times [0, 1]$ that is given as either

      \[ K^\mathrm{red}_\pm(x, y) = K(x, y) \pm K(x, -y),\]

      gaining a 4-fold speedup (because we take only a quarter of the domain) in constructing the SVE. The full singular functions can be reconstructed by (anti-)symmetrically continuing them to the negative axis. The reduced kernels. Compare their [0,1] × [0,1] subregions with the sampling point distribution plot above.

      Using the integration rules allows us to approximate

      \[\left. -\begin{aligned} + \int \dd{y} g(y) \approx \sum_j g(y_j) z_j.\]

      To get an idea regarding the distribution of these sampling points, refer to Fig. 2.2, which shows $\{x_i\} \times \{y_j\}$ for $\Lambda = 80$: Sampling point distribution resulting from a Cartesian product of Gauss integration rules.

      Note:

      The points do not cover $[-1, 1] \times [-1, 1]$ but only $[0, 1] \times [0, 1]$. This is actually a special case as we exploit the kernel's centrosymmetry, i.e. $K(x, y) = K(-x, -y)$. It is straightforward to show that the left/right singular vectors then can be chosen as either odd or even functions.

      Consequentially, we actually sample from a reduced kernel $K^\mathrm{red}_\pm$ on $[0, 1] \times [0, 1]$ that is given as either

      \[ K^\mathrm{red}_\pm(x, y) = K(x, y) \pm K(x, -y),\]

      gaining a 4-fold speedup (because we take only a quarter of the domain) in constructing the SVE. The full singular functions can be reconstructed by (anti-)symmetrically continuing them to the negative axis. Reduced kernels, as a function of x and y, parameterizing imaginary time and real frequency, respectively. Compare their [0,1] × [0,1] subregions with the sampling point distribution plot above.

      Using the integration rules allows us to approximate

      \[\begin{aligned} S_\ell U_\ell(x_i) &\approx \sum_j K(x_i, y_j) V_\ell(y_j) z_j &&\forall i \\ S_\ell V_\ell(y_j) &\approx \sum_i K(x_i, y_j) U_\ell(x_i) w_i &&\forall j -\end{aligned} -\right\}\]

      which we now multiply by $\sqrt{w_i}$ and $\sqrt{z_j}$ respectively to normalize our basis functions, yielding

      \[\left. -\begin{aligned} +\end{aligned}\]

      which we now multiply by $\sqrt{w_i}$ and $\sqrt{z_j}$ respectively to normalize our basis functions, yielding

      \[\begin{aligned} S_\ell \sqrt{w_i} U_\ell(x_i) &\approx \sum_j \sqrt{w_i} K(x_i, y_j) \sqrt{z_j} \sqrt{z_j} V_\ell(y_j) \\ S_\ell \sqrt{z_j} V_\ell(y_j) &\approx \sum_i \sqrt{w_i} K(x_i, y_j) \sqrt{z_j} \sqrt{w_i} U_\ell(x_i) -\end{aligned} -\right\}\]

      If we now define vectors $\vec u_\ell$, $\vec v_\ell$ and a matrix $K$ with entries $u_{\ell, i} \equiv \sqrt{w_i} U_\ell(x_i)$, $v_{\ell, j} \equiv \sqrt{z_j} V_\ell(y_j)$ and $K_{ij} \equiv \sqrt{w_i} K(x_i, y_j) \sqrt{z_j}$, then

      \[\left. -\begin{aligned} +\end{aligned}\]

      If we now define vectors $\vec u_\ell$, $\vec v_\ell$ and a matrix $K$ with entries $u_{\ell, i} \equiv \sqrt{w_i} U_\ell(x_i)$, $v_{\ell, j} \equiv \sqrt{z_j} V_\ell(y_j)$ and $K_{ij} \equiv \sqrt{w_i} K(x_i, y_j) \sqrt{z_j}$, we obtain

      \[\begin{aligned} S_\ell u_{\ell, i} &\approx \sum_j K_{ij} v_{\ell, j} \\ S_\ell v_{\ell, j} &\approx \sum_i K_{ij} u_{\ell, i} -\end{aligned} -\right\}\]

      or

      \[\left. -\begin{aligned} +\end{aligned}\]

      or

      \[\begin{aligned} S_\ell \vec u_\ell &\approx K^{\phantom{\mathrm{T}}} \vec v_\ell \\ S_\ell \vec v_\ell &\approx K^\mathrm{T} \vec u_\ell. -\end{aligned} -\right\}\]

      Together with the property $\vec u_\ell^\mathrm{T} \vec u_{\ell'} \approx \delta_{\ell\ell'} \approx \vec v_\ell^\mathrm{T} \vec v_{\ell'}$ we have successfully translated the original SVE problem into an SVD, because

      \[ K = \sum_\ell S_\ell \vec u_\ell \vec v_\ell^\mathrm{T}.\]

    3. The next step is calling the matrices function which computes the matrix $K$ derived in the previous step.

      Note

      The function is named in the plural because in the centrosymmetric case it actually returns two matrices $K_+$ and $K_-$, one for the even and one for the odd kernel. The SVDs of these matrices are later concatenated, so for simplicity, we will refer to $K$ from here on out.

      Info

      Special care is taken here to avoid FP-arithmetic cancellation around $x = -1$ and $x = +1$.

      Kernel matrices, rotated 90 degrees counterclockwise to make the connection with the (subregion [0,1] × [0,1] of the) previous figure more obvious. Thus we can see how the choice of sampling points has magnified and brought to the matrices' centers the regions of interest. Furthermore, elements with absolute values smaller than 10\\% of the maximum have been omitted to emphasize the structure; this should however not be taken to mean that there is any sparsity to speak of we could exploit in the next step.

    4. Take the truncated singular value decomposition (trSVD) of $K$, or rather, of $K_+$ and $K_-$. We use here a custom trSVD routine written by Markus Wallerberger which combines a homemade rank-revealing QR decomposition with GenericLinearAlgebra.svd!. This is necessary because there is currently no trSVD for quad precision types available.

    5. Via the function truncate, we throw away superfluous terms in our expansion. More specifically, we choose the basis size $L$ such that $S_\ell / S_0 > \varepsilon$ for all $\ell \leq L$. Here $\varepsilon$ is our selected precision, in our case it's equal to the double precision machine epsilon, $2^{-52} \approx 2.22 \times 10^{-16}$.

    6. Finally, we need a postprocessing step implemented in postprocess which performs some technical manipulation to turn the SVD result into the SVE we actually want. The functions are represented as piecewise Legendre polynomials, which model a function on the interval $[x_\mathrm{min}, x_\mathrm{max}]$ as a set of segments on the intervals $[a_i, a_{i+1}]$, where on each interval the function is expanded in scaled Legendre polynomials. The interval endpoints are chosen such that they reflect the approximate position of roots of a high-order singular function in $x$.

    The finishing touches

    The difficult part of constructing the FiniteTempBasis is now over. Next we truncate the left and right singular functions by discarding $U_\ell$ and $V_\ell$ with indices $\ell > L$ to match the $S_\ell$. The functions are now scaled to imaginary-time and frequency according to

    \[ \tau = \beta/2 (x + 1) \qand \omega = \omega_\mathrm{max} y\]

    and to match them, the singular values are multiplied by $\sqrt{(\beta/2)\omega}$, because $K(x,y) \sqrt{\dd x\dd y} = K(\tau,\omega) \sqrt{\dd\tau\dd\omega}$. We also add to our basis $\hat{U}_\ell(\mathrm{i}\omega)$, the Fourier transforms of the left singular functions, defined on the fermionic Matsubara frequencies $\mathrm{i}\omega = \mathrm{i}(2n+1)\beta/\pi$ (with integer $n$). This is particularly simple, because the Legendre polynomials' Fourier transforms are known analytically and given by spherical Bessel functions, for which we can rely on Bessels.jl [6].

    We can now take a look at our basis functions to get a feel for them:

    The first 6 left singular basis functions on the imaginary-time axis.

    The first 6 right singular basis functions on the frequency axis.

    Looking back at the image of the kernel $K(x,y)$ we can imagine how it is reconstructed by multiplying and summing (including a factor $S_\ell$) $U_\ell(\tau)$ and $V_\ell(\omega)$. An important property of the left singular functions is interlacing, i.e. $U_\ell$ interlaces $U_{\ell+1}$. A function $g$ with roots $\alpha_{n-1} \leq \ldots \leq \alpha_1$ interlaces a function $f$ with roots $\beta_n \leq \ldots \leq \beta_1$ if

    \[ \beta_n \leq \alpha_{n-1} \leq \beta_{n-1} \leq \ldots \leq \beta_1.\]

    We will use this property in constructing our sparse sampling set.

    The first 8 Fourier transformed basis functions on the Matsubara frequency axis.

    As for the Matsubara basis functions, we plot only the non-zero components, i.e. $\mathrm{Im}\;\hat U_\ell\,(\mathrm{i}\omega)$ with odd $\ell$ and $\mathrm{Re}\;\hat U_\ell\,(\mathrm{i}\omega)$ with even $\ell$.

    Constructing the samplers

    With our basis complete, we construct sparse sampling objects for fermionic propagators on the imaginary-time axis and on the Matsubara frequency axis.

    julia> sτ = TauSampling(basis);
    +\end{aligned}\]

    Together with the property $\vec u_\ell^\mathrm{T} \vec u_{\ell'} \approx \delta_{\ell\ell'} \approx \vec v_\ell^\mathrm{T} \vec v_{\ell'}$ we have successfully translated the original SVE problem into an SVD, because

    \[ K = \sum_\ell S_\ell \vec u_\ell \vec v_\ell^\mathrm{T}.\]

  3. The next step is calling the matrices function which computes the matrix $K$ derived in the previous step.

    Note

    The function is named in the plural because in the centrosymmetric case it actually returns two matrices $K_+$ and $K_-$, one for the even and one for the odd kernel. The SVDs of these matrices are later concatenated, so for simplicity, we will refer to $K$ from here on out.

    Info

    Special care is taken here to avoid FP-arithmetic cancellation around $x = -1$ and $x = +1$.

    Kernel matrices, rotated 90 degrees counterclockwise to make the connection with the (subregion [0,1] × [0,1] of the) previous figure more obvious. Thus we can see how the choice of sampling points has magnified and brought to the matrices' centers the regions of interest. Furthermore, elements with absolute values smaller than 10\\% of the maximum have been omitted to emphasize the structure; this should however not be taken to mean that there is any sparsity to speak of we could exploit in the next step.

  4. Take the truncated singular value decomposition (trSVD) of $K$, or rather, of $K_+$ and $K_-$. We use here a custom trSVD routine written by Markus Wallerberger which combines a homemade rank-revealing QR decomposition with GenericLinearAlgebra.svd!. This is necessary because there is currently no trSVD for quad precision types available.

  5. Via the function truncate, we throw away superfluous terms in our expansion. More specifically, we choose the basis size $L$ such that $S_\ell / S_0 > \varepsilon$ for all $\ell \leq L$. Here $\varepsilon$ is our selected precision, in our case it's equal to the double precision machine epsilon, $2^{-52} \approx 2.22 \times 10^{-16}$.

  6. Finally, we need a postprocessing step implemented in postprocess which performs some technical manipulation to turn the SVD result into the SVE we actually want. The functions are represented as piecewise Legendre polynomials, which model a function on the interval $[x_\mathrm{min}, x_\mathrm{max}]$ as a set of segments on the intervals $[a_i, a_{i+1}]$, where on each interval the function is expanded in scaled Legendre polynomials. The interval endpoints are chosen such that they reflect the approximate position of roots of a high-order singular function in $x$.

Finishing touches

The difficult part of constructing the FiniteTempBasis is now over. Next we truncate the left and right singular functions by discarding $U_\ell$ and $V_\ell$ with indices $\ell > L$ to match the $S_\ell$. The functions are now scaled to imaginary-time and frequency according to

\[ \tau = \beta/2 (x + 1) \qand \omega = \omega_\mathrm{max} y.\]

This means the singular values need to be multiplied by $\sqrt{(\beta/2)\omega_\mathrm{max}}$, because $K(x,y) \sqrt{\dd x\dd y} = K(\tau,\omega) \sqrt{\dd\tau\dd\omega}$. We also add to our basis $\hat{U}_\ell(\mathrm{i}\omega)$, the Fourier transforms of the left singular functions, defined on the fermionic Matsubara frequencies $\mathrm{i}\omega = \mathrm{i}(2n+1)\beta/\pi$ (with integer $n$). This is particularly simple, because the Legendre polynomials' Fourier transforms are known analytically and given by spherical Bessel functions, for which we can rely on Bessels.jl [6].

We can now take a look at our basis functions to get a feel for them:

First 6 left singular basis functions on the imaginary-time axis.

First 6 right singular basis functions on the frequency axis.

Looking back at the image of the kernel $K(x,y)$ we can imagine how it is reconstructed by multiplying and summing (including a factor $S_\ell$) $U_\ell(\tau)$ and $V_\ell(\omega)$. An important property of the left singular functions is interlacing, i.e. $U_\ell$ interlaces $U_{\ell+1}$. A function $g$ with roots $\alpha_{n-1} \leq \ldots \leq \alpha_1$ interlaces a function $f$ with roots $\beta_n \leq \ldots \leq \beta_1$ if

\[ \beta_n \leq \alpha_{n-1} \leq \beta_{n-1} \leq \ldots \leq \beta_1.\]

We will use this property for constructing our sparse sampling set.

First 8 Fourier transformed basis functions on the Matsubara frequency axis.

As for the Matsubara basis functions, we plot only the non-zero components, i.e. $\mathrm{Im}\;\hat U_\ell\,(\mathrm{i}\omega)$ with odd $\ell$ and $\mathrm{Re}\;\hat U_\ell\,(\mathrm{i}\omega)$ with even $\ell$.

Constructing the samplers

With our basis complete, we construct sparse sampling objects for fermionic propagators on the imaginary-time axis and on the Matsubara frequency axis.

julia> sτ = TauSampling(basis);
 
 julia> show(sampling_points(sτ))
 [0.018885255323127792, 0.10059312563754808, 0.25218900406693556, 0.4822117319309194, 0.8042299148252774, 1.2376463941125326, 1.8067997157763205, 2.535059399842931, 3.4296355795122793, 4.45886851573216, 5.541131484267839, 6.570364420487721, 7.464940600157068, 8.19320028422368, 8.762353605887466, 9.195770085174722, 9.51778826806908, 9.747810995933065, 9.899406874362452, 9.981114744676873]
@@ -91,7 +81,7 @@
 julia> siω = MatsubaraSampling(basis; positive_only=true);
 
 julia> show(sampling_points(siω))
-FermionicFreq[FermionicFreq(1), FermionicFreq(3), FermionicFreq(5), FermionicFreq(7), FermionicFreq(9), FermionicFreq(11), FermionicFreq(17), FermionicFreq(27), FermionicFreq(49), FermionicFreq(153)]

Both functions first determine a suitable set of sampling points on their respective axis. In the case of TauSampling, the sampling points $\{\tau_i\}$ are chosen as the extrema of the highest-order basis function in imaginary-time; this works because $U_\ell$ has exactly $\ell$ roots. This turns out to be close to optimal with respect to conditioning for this size (within a few percent). Similarly, MatsubaraSampling chooses sampling points $\{\mathrm{i}\omega_n\}$ as the (discrete) extrema of the highest-order basis function in Matsubara. By setting positive_only=true, one assumes that functions to be fitted are symmetric in Matsubara frequency, i.e.

\[ \hat G(\mathrm{i}\omega) = \qty(\hat G(-\mathrm{i}\omega))^*.\]

In this case, sparse sampling is performed over non-negative frequencies only, cutting away half of the necessary sampling space, so we get only 10 sampling points instead of the 20 in the imaginary-time case.

Then, both compute design matrices by $E^\tau_{i\ell} = u_\ell(\tau_i)$ and $E^\omega_{n\ell} = \hat{u}_\ell(i\omega_n)$ as well as their SVDs. We are now able to get the IR basis coefficients of a function that is known on the imaginary-time sampling points by solving the fitting problem

\[ G_\ell = \mathrm{arg\,min}_{G_\ell} \sum_{\{\tau_i\}} \norm{G(\tau_i) - \sum_\ell E^\tau_{i\ell} G_\ell}^2,\]

which can be done efficiently once the SVD is known. The same can be done on the Matsubara axis

\[ G_\ell = \mathrm{arg\,min}_{G_\ell} \sum_{\{\mathrm{i}\omega_n\}} \norm{\hat{G}(\mathrm{i}\omega_n) - \sum_\ell E^\omega_{n\ell} G_\ell}^2\]

and taken together we now have a way of moving efficiently between both. In solving these problems, we need to take their conditioning into consideration; in the case of the Matsubara axis, the problem is somewhat worse conditioned than on the imaginary-time axis due to its discrete nature. We augment it therefore with 4 additional sampling frequencies.

Scaling behavior of the fitting problems' conditioning.

Initializing the iteration

Because the non-interacting density of states is given $\rho_0(\omega) = \frac{2}{\pi}\sqrt{1 - \omega^2}$, we can easily get the IR basis coefficients for the non-interacting propagator

\[ {G_0}_\ell = -S_\ell {\rho_0}_\ell = -S_\ell \int \dd{\omega} V_\ell(\omega) \rho_0(\omega)\]

by utilizing the overlap function, which implements integration.

julia> U = 1.2
+FermionicFreq[FermionicFreq(1), FermionicFreq(3), FermionicFreq(5), FermionicFreq(7), FermionicFreq(9), FermionicFreq(11), FermionicFreq(17), FermionicFreq(27), FermionicFreq(49), FermionicFreq(153)]

Both functions first determine a suitable set of sampling points on their respective axis. In the case of TauSampling, the sampling points $\{\tau_i\}$ are chosen as the extrema of the highest-order basis function in imaginary-time; this works because $U_\ell$ has exactly $\ell$ roots. This turns out to be close to optimal with respect to conditioning for this size (within a few percent). Similarly, MatsubaraSampling chooses sampling points $\{\mathrm{i}\omega_n\}$ as the (discrete) extrema of the highest-order basis function in Matsubara. By setting positive_only=true, one assumes that functions to be fitted are symmetric in Matsubara frequency, i.e.

\[ \hat G(\mathrm{i}\omega) = \qty(\hat G(-\mathrm{i}\omega))^*.\]

In this case, sparse sampling is performed over non-negative frequencies only, cutting away half of the necessary sampling space, so we get only 10 sampling points instead of the 20 in the imaginary-time case.

Then, both compute design matrices by $E^\tau_{i\ell} = u_\ell(\tau_i)$ and $E^\omega_{n\ell} = \hat{u}_\ell(i\omega_n)$ as well as their SVDs. We are now able to get the IR basis coefficients of a function that is known on the imaginary-time sampling points by solving the fitting problem

\[ G_\ell = \mathrm{arg\,min}_{G_\ell} \sum_{\{\tau_i\}} \norm{G(\tau_i) - \sum_\ell E^\tau_{i\ell} G_\ell}^2,\]

which can be done efficiently once the SVD is known. The same can be done on the Matsubara axis

\[ G_\ell = \mathrm{arg\,min}_{G_\ell} \sum_{\{\mathrm{i}\omega_n\}} \norm{\hat{G}(\mathrm{i}\omega_n) - \sum_\ell E^\omega_{n\ell} G_\ell}^2\]

and taken together we now have a way of moving efficiently between both. In solving these problems, we need to take their conditioning into consideration; in the case of the Matsubara axis, the problem is somewhat worse conditioned than on the imaginary-time axis due to its discrete nature. We augment it therefore with 4 additional sampling frequencies.

Scaling behavior of the fitting problem conditioning.

Initializing the iteration

Because the non-interacting density of states is given $\rho_0(\omega) = \frac{2}{\pi}\sqrt{1 - \omega^2}$, we can easily get the IR basis coefficients for the non-interacting propagator

\[ {G_0}_\ell = -S_\ell {\rho_0}_\ell = -S_\ell \int \dd{\omega} V_\ell(\omega) \rho_0(\omega)\]

by utilizing the overlap function, which implements integration.

julia> U = 1.2
 1.2
 
 julia> ρ₀(ω) = 2/π * √(1 - clamp(ω, -1, +1)^2)
@@ -145,7 +135,7 @@
  1.6747120525708993e-16 - 0.8633270688082162im
                         ⋮
   1.627612150170272e-17 - 0.06489281188294724im
-  6.134766817544449e-19 - 0.020802317001514643im

Self-consistency loop

We are now ready to tackle the coupled equations from the start, and will state them here for the reader's convenience:

\[ \Sigma(\tau) = U^2 \pqty{G(\tau)}^3\]

and the Dyson equation

\[ \hat G(\mathrm{i}\omega) = \pqty{\pqty{\hat G_0(\mathrm{i}\omega)}^{-1} - \hat\Sigma(\mathrm{i}\omega)}^{-1}.\]

The first one is diagonal in $\tau$ and the second is diagonal in $\mathrm{i}\omega$, so we employ the IR basis to efficiently convert between the two bases. Starting with our approximation to $G_\ell$ we evaluate in the $\tau$-basis to get $G(\tau)$, from which we can compute the self-energy on the sampling points $\Sigma(\tau)$ according to the first equation. This can now be fitted to the $\tau$-basis to get $\Sigma_\ell$, and from there $\hat\Sigma(\mathrm{i}\omega)$ via evaluation in the $\mathrm{i}\omega$-basis. Now the Dyson equation is used to get $\hat G(\mathrm{i}\omega)$ on the sampling frequencies, which is then fitted to the $\mathrm{i}\omega$-basis yielding $G_\ell$ and completing the loop. This is now performed until convergence.

julia> while !isapprox(Gl, Gl_prev, rtol=ε)
+  6.134766817544449e-19 - 0.020802317001514643im

Self-consistency loop

We are now ready to tackle the coupled equations from the start, and will restate them here for the reader's convenience:

\[ \Sigma(\tau) = U^2 \pqty{G(\tau)}^3\]

and the Dyson equation

\[ \hat G(\mathrm{i}\omega) = \pqty{\pqty{\hat G_0(\mathrm{i}\omega)}^{-1} - \hat\Sigma(\mathrm{i}\omega)}^{-1}.\]

The first one is diagonal in $\tau$ and the second is diagonal in $\mathrm{i}\omega$, so we employ the IR basis to efficiently convert between the two bases. Starting with our approximation to $G_\ell$ we evaluate in the $\tau$-basis to get $G(\tau)$, from which we can compute the self-energy on the sampling points $\Sigma(\tau)$ according to the first equation. This can now be fitted to the $\tau$-basis to get $\Sigma_\ell$, and from there $\hat\Sigma(\mathrm{i}\omega)$ via evaluation in the $\mathrm{i}\omega$-basis. Now the Dyson equation is used to get $\hat G(\mathrm{i}\omega)$ on the sampling frequencies, which is then fitted to the $\mathrm{i}\omega$-basis yielding $G_\ell$ and completing the loop. This is now performed until convergence.

julia> while !isapprox(Gl, Gl_prev, rtol=ε)
            Gl_prev = copy(Gl)
            Gτ = evaluate(sτ, Gl)
            Στ = @. U^2 * Gτ^3
@@ -161,7 +151,7 @@
     \hat\Sigma(\mathrm{i}\omega_n) &= \sum_\ell \hat U_\ell(\mathrm{i}\omega_n) \Sigma_\ell \\
     \hat G(\mathrm{i}\omega_n) &= \pqty{\pqty{\hat G_0(\mathrm{i}\omega_n)}^{-1} - \hat\Sigma(\mathrm{i}\omega_n)}^{-1} \\
     G_\ell &= \mathrm{arg\,min}_{G_\ell} \sum_{\{\mathrm{i}\omega_n\}} \norm{\hat G(\mathrm{i}\omega_n) - \sum_\ell \hat U_\ell(\mathrm{i}\omega_n) G_\ell}^2
-\end{aligned}\]

We consider the iteration converged when the difference between subsequent iterations does not exceed the basis accuracy, i.e. when

\[ \norm{G_\ell - G^\mathrm{prev}_\ell} \leq \varepsilon \max\Bqty{\norm{G_\ell}, \norm{G^\mathrm{prev}_\ell}},\]

where the norm is $\norm{G_\ell}^2 = \sum_{\ell=1}^L G_\ell^2$.

The entire script, as presented in Appendix: Optimized script, takes around 60ms to run and allocates roughly 19MiB on a laptop CPU from 2019.

Visualizing the solution

To plot our solution for the self-energy, we create a MatsubaraSampling object on a dense box of sampling frequencies. In this case, we only need it for expanding, i.e. multiplying a vector, hence there is no need for constructing the SVD, so we pass factorize=false.

julia> box = FermionicFreq.(1:2:79)
+\end{aligned}\]

We consider the iteration converged when the difference between subsequent iterations does not exceed the basis accuracy, i.e. when

\[ \norm{G_\ell - G^\mathrm{prev}_\ell} \leq \varepsilon \max\Bqty{\norm{G_\ell}, \norm{G^\mathrm{prev}_\ell}},\]

where the norm is $\norm{G_\ell}^2 = \sum_{\ell=1}^L G_\ell^2$.

The entire script, as presented in Appendix: Optimized script, takes around 60ms to run on a laptop CPU from 2019 (Intel Core i7-9750H) and allocates roughly 19MB in the process.

Visualizing the solution

To plot our solution for the self-energy, we create a MatsubaraSampling object on a dense box of sampling frequencies. In this case, we only need it for expanding, i.e. multiplying a vector, hence there is no need for constructing the SVD, so we pass factorize=false.

julia> box = FermionicFreq.(1:2:79)
 40-element Vector{FermionicFreq}:
   π/β
   3π/β
@@ -177,7 +167,7 @@
  2.0279596075077236e-17 - 0.1225916020773678im
                         ⋮
  -6.624594477591435e-17 - 0.014786512975659354im
-  -7.08391512971528e-17 - 0.01441676347590391im

We are now in a position to visualize the results of our calculation:

  • In the main plot, the imaginary part of the self-energy in Matsubara alongside the sampling points on which it was computed. This illustrates very nicely one of the main advantages of our method: During the entire course of the iteration we only ever need to store and calculate with the values of all functions on the sparse set of sampling points and are still able to expand the result the a dense frequency set in the end.
  • In the inset, the IR basis coefficients of the self-energy and of the propagator, along with the basis singular values. We only plot the non-vanishing basis coefficients, which are those at odd values of $\ell$ because the real parts of $\hat G(\mathrm{i}\omega)$ and $\hat \Sigma(\mathrm{i}\omega)$ are almost zero. The singular values $S_\ell/S_1$ are the bound for $\abs{G_l / G_1}$ and $\abs{\Sigma_\ell / \Sigma_1}$.

Self-energy calculated in the self-consistency iteration. The inset shows the IR basis coefficients corresponding to the self-energy and the propagator.

Summary and outlook

We introduced SparseIR.jl, a full featured implementation of the intermediate representation in the Julia programming language. By means of a worked example, we explained in detail how to use it and the way it works internally. In this example, we solved an Anderson impurity model with elliptical density of states to second order via a self-consistent loop. We successfully obtained the self-energy (accurate to second order) with minimal computational effort.

Regarding further work, perhaps the single most obvious direction is the extension to multi-particle quantities; And indeed, [7, 8] did exactly this, with Markus Wallerberger writing the as of yet unpublished Julia library OvercompleteIR.jl which builds on top of SparseIR.jl. This library has already found applications in solving the parquet equations for the Hubbard model and for the Anderson impurity model [9].

References

[1]
H. Shinaoka, J. Otsuki, M. Ohzeki and K. Yoshimi. Compressing Green's function using intermediate representation between imaginary-time and real-frequency domains. Physical Review B 96, 35147 (2017).
[2]
J. Li, M. Wallerberger, N. Chikano, C.-N. Yeh, E. Gull and H. Shinaoka. Sparse sampling approach to efficient ab initio calculations at finite temperature. Physical Review B 101, 035144 (2020).
[3]
H. Shinaoka, N. Chikano, E. Gull, J. Li, T. Nomoto, J. Otsuki, M. Wallerberger, T. Wang and K. Yoshimi. Efficient ab initio many-body calculations based on sparse modeling of Matsubara Green’s function. SciPost Physics Lecture Notes (2022-09).
[4]
M. Wallerberger, S. Badr, S. Hoshino, S. Huber, F. Kakizawa, T. Koretsune, Y. Nagai, K. Nogaki, T. Nomoto, H. Mori, J. Otsuki, S. Ozaki, T. Plaikner, R. Sakurai, C. Vogel, N. Witt, K. Yoshimi and H. Shinaoka, sparse-ir: Optimal compression and sparse sampling of many-body propagators. SoftwareX 21, 101266 (2023-02).
[5]
[6]
M. Helton and O. Smith. Bessels.jl (2022).
[7]
H. Shinaoka, J. Otsuki, K. Haule, M. Wallerberger, E. Gull, K. Yoshimi and M. Ohzeki. Overcomplete compact representation of two-particle Green's functions. Physical Review B 97, 205111 (2018-05).
[8]
M. Wallerberger, H. Shinaoka and A. Kauch. Solving the Bethe-Salpeter equation with exponential convergence. Physical Review Research 3, 033168 (2021-08).
[9]
M. Michalek. Solving the Anderson impurity model with intermediate representation of the parquet equations. Master's thesis.

Appendix: Optimized script

With minimal modifications we can transform our code to be more optimized for performance:

  • Put script in a function. This is because globals are type instable in Julia.
  • Add ::Vector{Float64} annotation to ensure type inferrability of ρ₀l.
  • Gl in the loop will be a Vector{ComplexF64} in the loop, so make it complex right away for type stability.
  • Preallocate and reuse arrays to remove allocations in the loop, minimizing total allocations and time spent garbage collecting. Here we benefit from SparseIR.jl providing in-place variants fit! and evaluate!.
using SparseIR
+  -7.08391512971528e-17 - 0.01441676347590391im

We are now in a position to visualize the results of our calculation in Fig 2.9:

  • In the main plot, the imaginary part of the self-energy in Matsubara alongside the sampling points on which it was computed. This illustrates very nicely one of the main advantages of our method: During the entire course of the iteration we only ever need to store and calculate the values of all functions on the sparse set of sampling points and are still able to expand the result on a dense frequency set in the end.
  • In the inset, the IR basis coefficients of the self-energy and of the propagator are shown, along with the basis singular values. We only plot the non-vanishing basis coefficients, which are those at odd values of $\ell$ because the real parts of $\hat G(\mathrm{i}\omega)$ and $\hat \Sigma(\mathrm{i}\omega)$ are almost zero. The singular values $S_\ell/S_1$ are the bound for $\abs{G_l / G_1}$ and $\abs{\Sigma_\ell / \Sigma_1}$.

Self-energy calculated in the self-consistency iteration. The inset shows the IR basis coefficients corresponding to the self-energy and the propagator.

Summary and outlook

We introduced SparseIR.jl, a full featured implementation of the intermediate representation in the Julia programming language. By means of a simple example, we explained in detail how to use it and the way it works internally. In this example, we solved an Anderson impurity model with elliptical density of states to second order perturbation theory in the interaction via a self-consistent loop. We successfully obtained the self-energy (accurate to second order) with minimal computational effort.

Regarding further work, perhaps the single most obvious direction is the extension to multi-particle quantities; And indeed, Refs. [7, 8] did exactly this, with Markus Wallerberger writing the as of yet unpublished Julia library OvercompleteIR.jl which builds upon SparseIR.jl. So, as a transitive dependency, the library of the present thesis has already found applications in solving the parquet equations for the Hubbard model and for the Anderson impurity model [9].

References

[1]
H. Shinaoka, J. Otsuki, M. Ohzeki and K. Yoshimi. Compressing Green's function using intermediate representation between imaginary-time and real-frequency domains. Physical Review B 96, 35147 (2017).
[2]
J. Li, M. Wallerberger, N. Chikano, C.-N. Yeh, E. Gull and H. Shinaoka. Sparse sampling approach to efficient ab initio calculations at finite temperature. Physical Review B 101, 035144 (2020).
[3]
H. Shinaoka, N. Chikano, E. Gull, J. Li, T. Nomoto, J. Otsuki, M. Wallerberger, T. Wang and K. Yoshimi. Efficient ab initio many-body calculations based on sparse modeling of Matsubara Green's function. SciPost Phys. Lect. Notes, 63 (2022).
[4]
M. Wallerberger, S. Badr, S. Hoshino, S. Huber, F. Kakizawa, T. Koretsune, Y. Nagai, K. Nogaki, T. Nomoto, H. Mori, J. Otsuki, S. Ozaki, T. Plaikner, R. Sakurai, C. Vogel, N. Witt, K. Yoshimi and H. Shinaoka, sparse-ir: Optimal compression and sparse sampling of many-body propagators. SoftwareX 21, 101266 (2023-02).
[5]
[6]
M. Helton and O. Smith. Bessels.jl (2022).
[7]
H. Shinaoka, J. Otsuki, K. Haule, M. Wallerberger, E. Gull, K. Yoshimi and M. Ohzeki. Overcomplete compact representation of two-particle Green's functions. Physical Review B 97, 205111 (2018-05).
[8]
M. Wallerberger, H. Shinaoka and A. Kauch. Solving the Bethe-Salpeter equation with exponential convergence. Physical Review Research 3, 033168 (2021-08).
[9]
M. Michalek. Solving the Anderson impurity model with intermediate representation of the parquet equations (2024).

Appendix: Optimized script

With minimal modifications we can transform our code to be more optimized for performance:

  • Put script in a function. This is because globals are type instable in Julia.
  • Add ::Vector{Float64} annotation to ensure type inferrability of ρ₀l.
  • Gl in the loop will be a Vector{ComplexF64} in the loop, so make it complex right away for type stability.
  • Preallocate and reuse arrays to remove allocations in the loop, minimizing total allocations and time spent garbage collecting. Here we benefit from SparseIR.jl providing in-place variants fit! and evaluate!.
using SparseIR
 
 function main(; β=10.0, ωmax=8.0, ε=1e-6)
     # Construct the IR basis and sparse sampling for fermionic propagators
@@ -217,4 +207,4 @@
         fit!(Gl, siω, Giω)
     end
     return basis, Σl
-end
+end diff --git a/dev/index.html b/dev/index.html index b0bc523..e089993 100644 --- a/dev/index.html +++ b/dev/index.html @@ -1,2 +1,2 @@ -Home · SparseIR.jl
+Home · SparseIR.jl
diff --git a/dev/objects.inv b/dev/objects.inv index 360dc0e0a7639d9c22e0e0de9e71bb050a9f62ee..e65bf293b0b375b3a8cbb7fc0656b1ee341f4102 100644 GIT binary patch delta 2564 zcmV+f3j6i&6zdd_fqz|c+c*+_*D09V)mxOAc%~{lu6ePYXeYJ#@km;~vS1RHkRbsE z1Z62HA7Ib0x7dTsUTN{eAAkf1Q1QBEx5{P7WTU&$=x%)7fDumNAu3sVo{}V5yo{n> zNDgP)3Z+ngEFXLq{Z)L-it08)iT+wW`7XMapR2N&3^Dqs8oRkYfJO^?HdgDt8EP7mQTN&dI2yi)y-=QgeYp4$B22L9 z17=qQmyA{klYevH!9@x3B7@Xr0G&SK^ie}UBNfXKhK_L`)!SLUWFz*f7grdpUV7bP z*Oc65Fkh66zZtPjO&7Ivvo*}_7714$m5lq!agh;HI7atrBSAa61pIILSXqEz$8bp) z>4dX&mFIvS7XYUVVJSj>bIDee5vPo5%GsJ*yUGAv?0-R4!38`Nfa~Uz-;hU=JZi~j zH>j*YhQ73mO|oia9vZn?>zZ7P>EWQam{F3}(|(H)L%Kx7AO#~L^#M|5JD>}eD=0*i z$2?!(yoo+#=j>R($Z(xSM_t0LSpPCUF8+==pRb#WJM7o?J?HTY;_u&)EnjVvJUovS z#isq{q<`fETr{VAc!=(IJO@P^p-&Fkj-69{m^?H7GH9z~lq1+6f zxW_6ou4OY${hWO-^%d`QW38$TdM$d5U;^(^3E#mj&npyOlk9i{6Go_aEDs2rhya{LVh5*WId4KN#E2~>T!IHwy&!ZItnNO<0fD1KP zD5-)?PwZNxMef5P9(tqzZ>8Dq_ylEexJWN zzi-6K#MU!dy}!BH=X~c{SGSoP1DR-Ykk&`M`)Y_M$sWCh6W~GZu}uyh&T#jAHPHRBHO;$qCLcAoaI$!EJU}h z)W7+k52o3Dn}}0{X8YkD(Y;ZRTLPPfjelK188^P8B#*bCDoe+NWXu|SiG10A50()|8D6qFxs$@FA zeZ$nKY-7{O<_0ySia*ns!m`SkBeXd^1e6bfw-mhib#;yQguzr~ul4ijs|_i zBk}j$Q-u?id+}0%`!SuI_gq?!CVvLU47!B4Rv%ZUJ*ANicQd`U9%E3bIrjazp6z(1 zDM-l*N)pJEUBCF~AZ3S{gK)-xsZlX>Y%O5gfiz@h=KMx<^eJVsaA{6v>A!t zt|npXi%OPeZ7v-J1Uv!nd4=mK#MV(V^O(lmO!(oF~l4*++ z-T@A5Vg{9NsU3H!;S-;WAEpo=ISFLKD#$1imo%P;)wy^I5e*@ofR2KYQHiz~-Jt~FB&=E5JEmAe=0gEInI<4>4k>%% zL(_4`flsK%!ZZOtiC{P)1sAV+t5vk!*#cJYaMx~VGiCcaLpEw<=hFS?t? zmhYst8Kqa;U3ba|aV`3S>v|*JM!-(Fhw(0Wu3sPn6Ec})oF|=`LF~&+4R|W|Y5WN! z7%;LXUQ!SE+$R9meiaZ zn@NKwS4*p3j8G1PA5>+LsL+t-kmfKkdYW`ANB!Es(YfG@@-v0fM960Y3YB9W=|K%q^spHj~H!F2B#gQHq^zcPlkBu!wC`LVJ zFPf~b7g6eRfcLV9IaH6)=5~R#ckFQe59;fWhJWKfmZyJc9{uR_apQauqV0~wVl^l2 zRiabVat^6~sc5T?AA zEUl|sH<|R46zS40Pr)QTQI^GRn?uIYjqdI*PXH*C5H@H4TS1?%p8H}uQ7_Z{@ky$`a$pjYkRu5O z1Z621KR};iTpBK;tggvi*Qkq zCFl+(ouohgIhfD>hFJy=ZLg)Ud4^J4%tcpLzl+ z)@z0lPjSYr!fDP)O1|KDUPO%KHp0t1$5~7c7s1D7Kp+Rqu4z_qRz{p;+d1Fid4bYA z!OUh4l|BIapnsvC(UK=5!6OC$_dFQ96%8%FOEoX!glZ$@lW(*-Tv zY=x7}JOcGe!C}RZ^MumeGP+M22-?{dg1@WB(gHNI46g_Sm2kEy(-g7e9A)ubSc=l$ zY_cU~0F*&ZIa`6X%LK9c9wjB7<3o->H>>=HJgCTnmVbP9ONtUDNx%tJ^ohr{sbka~x>SH}9wmycHf)gva^cGtlJgx&)X1+P()KzYuu;j&8wU zNc8YLkbe~G_M4R!2tYJYK0E~XJIF=BTIiEvzN4|KkyV?sS67t`__Rtnd-eM4{QB(t z`-XBocw!$b$w13yocuZezS38`Q;oGO6YR9;4Z#ti=U51- zED|Y1TJ|zKJ`b8-4-VVuV_`lpM$S+B^6IrJuzw07lGgIxBVLquh@l0;pPvUyj1rer zjR6SNSV*aYO;7AfBv$ZV9}hh;lx-`sZ(xE|c-crV?;t6mh&SxrYenxddrxDCFc;|> zl?~f@&+i+tRch=RtllB*Y*hEu81+oH#XRI$((?@0@@S*_KA{UR`mNI}5l!=w$lNx;1>{VQ*Er4PW~b%5Hk%1=tzCckNb22Wv(@-g8|A4P zZIGv`NKs>E^tCcgJ!h@dS!0OMFh!h^gI5dI4i;= zEfZ}ax^1QY4Szo9W_N8ORuPKrhkHc#Mt?qT2}~B&b_HqNaKmUCZc$kjmI+lctL-J# z%l;GZd0OO-&a#bvO{zv}d^2i*Hs_o2Zd)QK1AhOf%~x8Iv+Fl!KV7MV4*vTgpKNl>mimPynX6-+A3*ihT9Acb~_uuG9YUv!&(%g%)YdvZ-E zu(wspWIDio!_+8kW75gw1~sG#KeLeGqD;6Yw1FNY21D>2Loa?^-jF@zI2PG!^*p$4 ztnqc#c}VL>@V4*?;7`XVZE#F<55MCB&8dxYq4i8Ci2T-COH18ikx= z-=FK*j%S*j7CdJ(!jSCx#YYDzJIw5bGX_kxilJj`0MizvQDtV}H=?6UDV>E)BVDY5 zEnT6_Y4~E7NGF}~{=J<=8T8C3}LlyK?Eu+jk9eoO6d zVdmQbxTowl#8`r}E#KMnXn$D4Vyl_Jwd@AoCbb&wwLj>&Mksqt8GcdcR8v#QqQ`L9 zpp~TCBE}m8z$WCV42xQ+FsCARy>_*TX8Vw#n3R6>y_ympqtsUCRVC{;I=Mm4c=f7O zt&<$Q-qvv}`}z*pAesp!X#C8%6|TlNMPI-Dx)Su=_6dI)cF=yHHGgWFsw$sNbl913 zZmZKm_GDcFA?zu;0vp07MRgaQ%r!+$kTeAXB77!XaJ3W}lj*9q4`qjPGfOR#bTi~DywU5E=w+Pb<0Xz zr@NvBo{~fP{{+lw_58m^I7=Lt5srt^V_ZAqqPsk$onQ@joKk?b&*`JQ4rs_N6D}rA zyq46Q8|z5}lB=QBFY@lx_O;Gm%@}gGa`pC`tnF$@SAT%9lA+B+GlJ#3b_9Vc3>DRT z$=mxNR<%3bW0s&?Q+6>-3zn-yi&p!^c*tvFlqoFi(K~X%Xfc@6qK72`(uN!`ZFq+Y zJgv{z?Jg%Y947BzkJ@L@E_(dyn;J*r63Es0v~E#_O+%DW0Btcz-SAmCEA|7)-UzwV z;#90`IDc&ZRs?L>DB3XgLI=HiZ2~MiKb()W%^#t2uFCLhnV;A>hh*MA;2w+4evPKw z|Cn;QO_h3Dv>HntYHy|;h=BSqFbg-Q%B&i?X~pj7ed?VF{%nPN!~5_-6ztjETb zA|#`pv*%4#SBoflIly^YL?5cgXmh*3*gLkk{(lGg^+(O|AIsA}G>?9?`nYz!h{<-x zL$R8Z_A1e-e#)-+leNU79R(B|HkH^Zm-3A)%!z$aiEGHpm148%2B_D%K?x{td%1Xi zq9kKbOBTk}t(#2hNlN9?FHgZFJyC|mZJR^d(T(cvFHZm{j8Z;m02@J{v!&T_as$YI z+kXkTnXlKFRcY*-d1K^TlmKibkd@HnvIWrq51-)wiN5lVkfUm-YW2qB+$3 zVn3ji`QQ70B?cTv_7qFlMko{It@uAL{A^;EP8};Sqes8)H7O#7IaqR+qAXkDT%2_p gF6x;gJl{cO5*8fY=?z8Cp5<`3wR!m;MFVB?;0EabE&u=k diff --git a/dev/private/index.html b/dev/private/index.html index 9e81500..7bb43f8 100644 --- a/dev/private/index.html +++ b/dev/private/index.html @@ -1,23 +1,23 @@ -Private · SparseIR.jl

Private names index

These are not considered API and therefore not covered by any semver promises.

Core.IntMethod

Get prefactor n for the Matsubara frequency ω = n*π/β

source
Core.UnionMethod
(polyFT::PiecewiseLegendreFT)(ω)

Obtain Fourier transform of polynomial for given MatsubaraFreq ω.

source
SparseIR.AbstractAugmentationType
AbstractAugmentation

Scalar function in imaginary time/frequency.

This represents a single function in imaginary time and frequency, together with some auxiliary methods that make it suitable for augmenting a basis.

See also: AugmentedBasis

source
SparseIR.AbstractBasisType
AbstractBasis

Abstract base class for bases on the imaginary-time axis.

Let basis be an abstract basis. Then we can expand a two-point propagator G(τ), where τ is imaginary time, into a set of basis functions:

G(τ) == sum(basis.u[l](τ) * g[l] for l in 1:length(basis)) + ϵ(τ),

where basis.u[l] is the l-th basis function, g[l] is the associated expansion coefficient and ϵ(τ) is an error term. Similarly, the Fourier transform Ĝ(n), where n is now a Matsubara frequency, can be expanded as follows:

Ĝ(n) == sum(basis.uhat[l](n) * g[l] for l in 1:length(basis)) + ϵ(n),

where basis.uhat[l] is now the Fourier transform of the basis function.

source
SparseIR.AbstractKernelType
(kernel::AbstractKernel)(x, y[, x₊, x₋])

Evaluate kernel at point (x, y).

The parameters x₊ and x₋, if given, shall contain the values of x - xₘᵢₙ and xₘₐₓ - x, respectively. This is useful if either difference is to be formed and cancellation expected.

source
SparseIR.AbstractKernelType
AbstractKernel

Integral kernel K(x, y).

Abstract base type for an integral kernel, i.e. a AbstractFloat binary function $K(x, y)$ used in a Fredhold integral equation of the first kind:

\[ u(x) = ∫ K(x, y) v(y) dy\]

where $x ∈ [x_\mathrm{min}, x_\mathrm{max}]$ and $y ∈ [y_\mathrm{min}, y_\mathrm{max}]$. For its SVE to exist, the kernel must be square-integrable, for its singular values to decay exponentially, it must be smooth.

In general, the kernel is applied to a scaled spectral function $ρ'(y)$ as:

\[ ∫ K(x, y) ρ'(y) dy,\]

where $ρ'(y) = w(y) ρ(y)$.

source
SparseIR.AbstractSamplingType
AbstractSampling

Abstract type for sparse sampling.

Encodes the "basis transformation" of a propagator from the truncated IR basis coefficients G_ir[l] to time/frequency sampled on sparse points G(x[i]) together with its inverse, a least squares fit:

     ________________                   ___________________
+Private · SparseIR.jl

Private names index

These are not considered API and therefore not covered by any semver promises.

Core.IntMethod

Get prefactor n for the Matsubara frequency ω = n*π/β

source
Core.UnionMethod
(polyFT::PiecewiseLegendreFT)(ω)

Obtain Fourier transform of polynomial for given MatsubaraFreq ω.

source
SparseIR.AbstractAugmentationType
AbstractAugmentation

Scalar function in imaginary time/frequency.

This represents a single function in imaginary time and frequency, together with some auxiliary methods that make it suitable for augmenting a basis.

See also: AugmentedBasis

source
SparseIR.AbstractBasisType
AbstractBasis

Abstract base class for bases on the imaginary-time axis.

Let basis be an abstract basis. Then we can expand a two-point propagator G(τ), where τ is imaginary time, into a set of basis functions:

G(τ) == sum(basis.u[l](τ) * g[l] for l in 1:length(basis)) + ϵ(τ),

where basis.u[l] is the l-th basis function, g[l] is the associated expansion coefficient and ϵ(τ) is an error term. Similarly, the Fourier transform Ĝ(n), where n is now a Matsubara frequency, can be expanded as follows:

Ĝ(n) == sum(basis.uhat[l](n) * g[l] for l in 1:length(basis)) + ϵ(n),

where basis.uhat[l] is now the Fourier transform of the basis function.

source
SparseIR.AbstractKernelType
(kernel::AbstractKernel)(x, y[, x₊, x₋])

Evaluate kernel at point (x, y).

The parameters x₊ and x₋, if given, shall contain the values of x - xₘᵢₙ and xₘₐₓ - x, respectively. This is useful if either difference is to be formed and cancellation expected.

source
SparseIR.AbstractKernelType
AbstractKernel

Integral kernel K(x, y).

Abstract base type for an integral kernel, i.e. a AbstractFloat binary function $K(x, y)$ used in a Fredhold integral equation of the first kind:

\[ u(x) = ∫ K(x, y) v(y) dy\]

where $x ∈ [x_\mathrm{min}, x_\mathrm{max}]$ and $y ∈ [y_\mathrm{min}, y_\mathrm{max}]$. For its SVE to exist, the kernel must be square-integrable, for its singular values to decay exponentially, it must be smooth.

In general, the kernel is applied to a scaled spectral function $ρ'(y)$ as:

\[ ∫ K(x, y) ρ'(y) dy,\]

where $ρ'(y) = w(y) ρ(y)$.

source
SparseIR.AbstractSamplingType
AbstractSampling

Abstract type for sparse sampling.

Encodes the "basis transformation" of a propagator from the truncated IR basis coefficients G_ir[l] to time/frequency sampled on sparse points G(x[i]) together with its inverse, a least squares fit:

     ________________                   ___________________
     |                |    evaluate     |                   |
     |     Basis      |---------------->|     Value on      |
     |  coefficients  |<----------------|  sampling points  |
-    |________________|      fit        |___________________|
source
SparseIR.CentrosymmSVEType
CentrosymmSVE <: AbstractSVE

SVE of centrosymmetric kernel in block-diagonal (even/odd) basis.

For a centrosymmetric kernel K, i.e., a kernel satisfying: K(x, y) == K(-x, -y), one can make the following ansatz for the singular functions:

u[l](x) = ured[l](x) + sign[l] * ured[l](-x)
+    |________________|      fit        |___________________|
source
SparseIR.CentrosymmSVEType
CentrosymmSVE <: AbstractSVE

SVE of centrosymmetric kernel in block-diagonal (even/odd) basis.

For a centrosymmetric kernel K, i.e., a kernel satisfying: K(x, y) == K(-x, -y), one can make the following ansatz for the singular functions:

u[l](x) = ured[l](x) + sign[l] * ured[l](-x)
 v[l](y) = vred[l](y) + sign[l] * ured[l](-y)

where sign[l] is either +1 or -1. This means that the singular value expansion can be block-diagonalized into an even and an odd part by (anti-)symmetrizing the kernel:

K_even = K(x, y) + K(x, -y)
-K_odd  = K(x, y) - K(x, -y)

The lth basis function, restricted to the positive interval, is then the singular function of one of these kernels. If the kernel generates a Chebyshev system [1], then even and odd basis functions alternate.

[1]: A. Karlin, Total Positivity (1968).

source
SparseIR.LogisticKernelOddType
LogisticKernelOdd <: AbstractReducedKernel

Fermionic analytical continuation kernel, odd.

In dimensionless variables $x = 2τ/β - 1$, $y = βω/Λ$, the fermionic integral kernel is a function on $[-1, 1] × [-1, 1]$:

\[ K(x, y) = -\frac{\sinh(Λ x y / 2)}{\cosh(Λ y / 2)}\]

source
SparseIR.PiecewiseLegendreFTType
PiecewiseLegendreFT <: Function

Fourier transform of a piecewise Legendre polynomial.

For a given frequency index n, the Fourier transform of the Legendre function is defined as:

    p̂(n) == ∫ dx exp(im * π * n * x / (xmax - xmin)) p(x)

The polynomial is continued either periodically (freq=:even), in which case n must be even, or antiperiodically (freq=:odd), in which case n must be odd.

source
SparseIR.PiecewiseLegendrePolyType
PiecewiseLegendrePoly <: Function

Piecewise Legendre polynomial.

Models a function on the interval $[xmin, xmax]$ as a set of segments on the intervals $S[i] = [a[i], a[i+1]]$, where on each interval the function is expanded in scaled Legendre polynomials.

source
SparseIR.PowerModelType
PowerModel

Model from a high-frequency series expansion::

A(iω) == sum(A[n] / (iω)^(n+1) for n in 1:N)

where $iω == i * π/2 * wn$ is a reduced imaginary frequency, i.e., $wn$ is an odd/even number for fermionic/bosonic frequencies.

source
SparseIR.ReducedKernelType
ReducedKernel

Restriction of centrosymmetric kernel to positive interval.

For a kernel $K$ on $[-1, 1] × [-1, 1]$ that is centrosymmetric, i.e. $K(x, y) = K(-x, -y)$, it is straight-forward to show that the left/right singular vectors can be chosen as either odd or even functions.

Consequentially, they are singular functions of a reduced kernel $K_\mathrm{red}$ on $[0, 1] × [0, 1]$ that is given as either:

\[ K_\mathrm{red}(x, y) = K(x, y) \pm K(x, -y)\]

This kernel is what this type represents. The full singular functions can be reconstructed by (anti-)symmetrically continuing them to the negative axis.

source
SparseIR.RegularizedBoseKernelOddType
RegularizedBoseKernelOdd <: AbstractReducedKernel

Bosonic analytical continuation kernel, odd.

In dimensionless variables $x = 2 τ / β - 1$, $y = β ω / Λ$, the fermionic integral kernel is a function on $[-1, 1] × [-1, 1]$:

\[ K(x, y) = -y \frac{\sinh(Λ x y / 2)}{\sinh(Λ y / 2)}\]

source
SparseIR.RuleType
Rule{T<:AbstractFloat}

Quadrature rule.

Approximation of an integral over [a, b] by a sum over discrete points x with weights w:

\[ ∫ f(x) ω(x) dx ≈ ∑_i f(x_i) w_i\]

where we generally have superexponential convergence for smooth $f(x)$ in the number of quadrature points.

source
SparseIR.SVEResultMethod
SVEResult(kernel::AbstractKernel;
+K_odd  = K(x, y) - K(x, -y)

The lth basis function, restricted to the positive interval, is then the singular function of one of these kernels. If the kernel generates a Chebyshev system [1], then even and odd basis functions alternate.

[1]: A. Karlin, Total Positivity (1968).

source
SparseIR.LogisticKernelOddType
LogisticKernelOdd <: AbstractReducedKernel

Fermionic analytical continuation kernel, odd.

In dimensionless variables $x = 2τ/β - 1$, $y = βω/Λ$, the fermionic integral kernel is a function on $[-1, 1] × [-1, 1]$:

\[ K(x, y) = -\frac{\sinh(Λ x y / 2)}{\cosh(Λ y / 2)}\]

source
SparseIR.PiecewiseLegendreFTType
PiecewiseLegendreFT <: Function

Fourier transform of a piecewise Legendre polynomial.

For a given frequency index n, the Fourier transform of the Legendre function is defined as:

    p̂(n) == ∫ dx exp(im * π * n * x / (xmax - xmin)) p(x)

The polynomial is continued either periodically (freq=:even), in which case n must be even, or antiperiodically (freq=:odd), in which case n must be odd.

source
SparseIR.PiecewiseLegendrePolyType
PiecewiseLegendrePoly <: Function

Piecewise Legendre polynomial.

Models a function on the interval $[xmin, xmax]$ as a set of segments on the intervals $S[i] = [a[i], a[i+1]]$, where on each interval the function is expanded in scaled Legendre polynomials.

source
SparseIR.PowerModelType
PowerModel

Model from a high-frequency series expansion::

A(iω) == sum(A[n] / (iω)^(n+1) for n in 1:N)

where $iω == i * π/2 * wn$ is a reduced imaginary frequency, i.e., $wn$ is an odd/even number for fermionic/bosonic frequencies.

source
SparseIR.ReducedKernelType
ReducedKernel

Restriction of centrosymmetric kernel to positive interval.

For a kernel $K$ on $[-1, 1] × [-1, 1]$ that is centrosymmetric, i.e. $K(x, y) = K(-x, -y)$, it is straight-forward to show that the left/right singular vectors can be chosen as either odd or even functions.

Consequentially, they are singular functions of a reduced kernel $K_\mathrm{red}$ on $[0, 1] × [0, 1]$ that is given as either:

\[ K_\mathrm{red}(x, y) = K(x, y) \pm K(x, -y)\]

This kernel is what this type represents. The full singular functions can be reconstructed by (anti-)symmetrically continuing them to the negative axis.

source
SparseIR.RegularizedBoseKernelOddType
RegularizedBoseKernelOdd <: AbstractReducedKernel

Bosonic analytical continuation kernel, odd.

In dimensionless variables $x = 2 τ / β - 1$, $y = β ω / Λ$, the fermionic integral kernel is a function on $[-1, 1] × [-1, 1]$:

\[ K(x, y) = -y \frac{\sinh(Λ x y / 2)}{\sinh(Λ y / 2)}\]

source
SparseIR.RuleType
Rule{T<:AbstractFloat}

Quadrature rule.

Approximation of an integral over [a, b] by a sum over discrete points x with weights w:

\[ ∫ f(x) ω(x) dx ≈ ∑_i f(x_i) w_i\]

where we generally have superexponential convergence for smooth $f(x)$ in the number of quadrature points.

source
SparseIR.SVEResultMethod
SVEResult(kernel::AbstractKernel;
     Twork=nothing, ε=nothing, lmax=typemax(Int),
     n_gauss=nothing, svd_strat=:auto,
     sve_strat=iscentrosymmetric(kernel) ? CentrosymmSVE : SamplingSVE
-)

Perform truncated singular value expansion of a kernel.

Perform a truncated singular value expansion (SVE) of an integral kernel kernel : [xmin, xmax] x [ymin, ymax] -> ℝ:

kernel(x, y) == sum(s[l] * u[l](x) * v[l](y) for l in (1, 2, 3, ...)),

where s[l] are the singular values, which are ordered in non-increasing fashion, u[l](x) are the left singular functions, which form an orthonormal system on [xmin, xmax], and v[l](y) are the right singular functions, which form an orthonormal system on [ymin, ymax].

The SVE is mapped onto the singular value decomposition (SVD) of a matrix by expanding the kernel in piecewise Legendre polynomials (by default by using a collocation).

Arguments

  • K::AbstractKernel: Integral kernel to take SVE from.

  • ε::Real: Accuracy target for the basis: attempt to have singular values down to a relative magnitude of ε, and have each singular value and singular vector be accurate to ε. A Twork with a machine epsilon of ε^2 or lower is required to satisfy this. Defaults to 2.2e-16 if xprec is available, and 1.5e-8 otherwise.

  • cutoff::Real: Relative cutoff for the singular values. A Twork with machine epsilon of cutoff is required to satisfy this. Defaults to a small multiple of the machine epsilon.

    Note that cutoff and ε serve distinct purposes. cutoff reprsents the accuracy to which the kernel is reproduced, whereas ε is the accuracy to which the singular values and vectors are guaranteed.

  • lmax::Integer: Maximum basis size. If given, only at most the lmax most significant singular values and associated singular functions are returned.

  • `n_gauss (int): Order of Legendre polynomials. Defaults to kernel hinted value.

  • Twork: Working data type. Defaults to a data type with machine epsilon of at mostε^2and at mostcutoff`, or otherwise most accurate data type available.

  • sve_strat::AbstractSVE: SVE to SVD translation strategy. Defaults to SamplingSVE, optionally wrapped inside of a CentrosymmSVE if the kernel is centrosymmetric.

  • svd_strat ('fast' or 'default' or 'accurate'): SVD solver. Defaults to fast (ID/RRQR) based solution when accuracy goals are moderate, and more accurate Jacobi-based algorithm otherwise.

Returns: An SVEResult containing the truncated singular value expansion.

source
SparseIR.SamplingSVEType
SamplingSVE <: AbstractSVE

SVE to SVD translation by sampling technique [1].

Maps the singular value expansion (SVE) of a kernel kernel onto the singular value decomposition of a matrix A. This is achieved by choosing two sets of Gauss quadrature rules: (x, wx) and (y, wy) and approximating the integrals in the SVE equations by finite sums. This implies that the singular values of the SVE are well-approximated by the singular values of the following matrix:

A[i, j] = √(wx[i]) * K(x[i], y[j]) * √(wy[j])

and the values of the singular functions at the Gauss sampling points can be reconstructed from the singular vectors u and v as follows:

u[l,i] ≈ √(wx[i]) u[l](x[i])
-v[l,j] ≈ √(wy[j]) u[l](y[j])

[1] P. Hansen, Discrete Inverse Problems, Ch. 3.1

source
SparseIR.accuracyFunction
accuracy(basis::AbstractBasis)

Accuracy of the basis.

Upper bound to the relative error of reprensenting a propagator with the given number of basis functions (number between 0 and 1).

source
SparseIR.canonicalize!Method
canonicalize!(u, v)

Canonicalize basis.

Each SVD (u[l], v[l]) pair is unique only up to a global phase, which may differ from implementation to implementation and also platform. We fix that gauge by demanding u[l](1) > 0. This ensures a diffeomorphic connection to the Legendre polynomials as Λ → 0.

source
SparseIR.conv_radiusFunction
conv_radius(kernel)

Convergence radius of the Matsubara basis asymptotic model.

For improved relative numerical accuracy, the IR basis functions on the Matsubara axis uhat(basis, n) can be evaluated from an asymptotic expression for abs(n) > conv_radius. If isinf(conv_radius), then the asymptotics are unused (the default).

source
SparseIR.default_matsubara_sampling_pointsFunction
default_matsubara_sampling_points(basis::AbstractBasis; positive_only=false)

Default sampling points on the imaginary frequency axis.

Arguments

  • positive_only::Bool: Only return non-negative frequencies. This is useful if the object to be fitted is symmetric in Matsubura frequency, ĝ(ω) == conj(ĝ(-ω)), or, equivalently, real in imaginary time.
source
SparseIR.eval_matrixFunction
eval_matrix(T, basis, x)

Return evaluation matrix from coefficients to sampling points. T <: AbstractSampling.

source
SparseIR.find_extremaMethod
find_extrema(polyFT::PiecewiseLegendreFT; part=nothing, grid=DEFAULT_GRID)

Obtain extrema of Fourier-transformed polynomial.

source
SparseIR.finite_temp_basesFunction
finite_temp_bases(β::Real, ωmax::Real, ε=nothing;
-                  kernel=LogisticKernel(β * ωmax), sve_result=SVEResult(kernel; ε))

Construct FiniteTempBasis objects for fermion and bosons using the same LogisticKernel instance.

source
SparseIR.from_IRFunction
from_IR(dlr::DiscreteLehmannRepresentation, gl::AbstractArray, dims=1)

From IR to DLR. gl`: Expansion coefficients in IR.

source
SparseIR.get_symmetrizedMethod
get_symmetrized(kernel, sign)

Construct a symmetrized version of kernel, i.e. kernel(x, y) + sign * kernel(x, -y).

Beware!

By default, this returns a simple wrapper over the current instance which naively performs the sum. You may want to override this to avoid cancellation.

source
SparseIR.get_tnlMethod
get_tnl(l, w)

Fourier integral of the l-th Legendre polynomial::

Tₗ(ω) == ∫ dx exp(iωx) Pₗ(x)
source
SparseIR.giwMethod
giw(polyFT, wn)

Return model Green's function for reduced frequencies

source
SparseIR.iscentrosymmetricFunction
iscentrosymmetric(kernel)

Return true if kernel(x, y) == kernel(-x, -y) for all values of x and y in range. This allows the kernel to be block-diagonalized, speeding up the singular value expansion by a factor of 4. Defaults to false.

source
SparseIR.matop!Method
matop!(buffer, mat, arr::AbstractArray, op, dim)

Apply the operator op to the matrix mat and to the array arr along the first dimension (dim=1) or the last dimension (dim=N).

source
SparseIR.matop_along_dim!Method
matop_along_dim!(buffer, mat, arr::AbstractArray, dim::Integer, op)

Apply the operator op to the matrix mat and to the array arr along the dimension dim, writing the result to buffer.

source
SparseIR.movedimMethod
movedim(arr::AbstractArray, src => dst)

Move arr's dimension at src to dst while keeping the order of the remaining dimensions unchanged.

source
SparseIR.nsvalsMethod
nsvals(hints)

Upper bound for number of singular values.

Upper bound on the number of singular values above the given threshold, i.e. where s[l] ≥ ε * first(s).

source
SparseIR.phase_stableMethod
phase_stable(poly, wn)

Phase factor for the piecewise Legendre to Matsubara transform.

Compute the following phase factor in a stable way:

exp.(iπ/2 * wn * cumsum(Δx(poly)))
source
SparseIR.piecewiseMethod
piecewise(rule, edges)

Piecewise quadrature with the same quadrature rule, but scaled.

source
SparseIR.rescaleMethod
rescale(basis::FiniteTempBasis, new_β)

Return a basis for different temperature.

Uses the same kernel with the same $ε$, but a different temperature. Note that this implies a different UV cutoff $ωmax$, since $Λ == β * ωmax$ stays constant.

source
SparseIR.segments_xMethod
segments_x(sve_hints::AbstractSVEHints[, T])

Segments for piecewise polynomials on the $x$ axis.

List of segments on the $x$ axis for the associated piecewise polynomial. Should reflect the approximate position of roots of a high-order singular function in $x$.

source
SparseIR.segments_yMethod
segments_y(sve_hints::AbstractSVEHints[, T])

Segments for piecewise polynomials on the $y$ axis.

List of segments on the $y$ axis for the associated piecewise polynomial. Should reflect the approximate position of roots of a high-order singular function in $y$.

source
SparseIR.shift_xmidMethod
shift_xmid(knots, Δx)

Return midpoint relative to the nearest integer plus a shift.

Return the midpoints xmid of the segments, as pair (diff, shift), where shift is in (0, 1, -1) and diff is a float such that xmid == shift + diff to floating point accuracy.

source
SparseIR.significanceFunction
significance(basis::AbstractBasis)

Return vector σ, where 0 ≤ σ[i] ≤ 1 is the significance level of the i-th basis function. If ϵ is the desired accuracy to which to represent a propagator, then any basis function where σ[i] < ϵ can be neglected.

For the IR basis, we simply have that σ[i] = s[i] / first(s).

source
SparseIR.splitMethod
split(poly, x)

Split segment.

Find segment of poly's domain that covers x.

source
SparseIR.statisticsMethod
statistics(basis::AbstractBasis)

Quantum statistic (Statistics instance, Fermionic() or Bosonic()).

source
SparseIR.sve_hintsFunction
sve_hints(kernel, ε)

Provide discretisation hints for the SVE routines.

Advises the SVE routines of discretisation parameters suitable in tranforming the (infinite) SVE into an (finite) SVD problem.

See also AbstractSVEHints.

source
SparseIR.to_IRFunction
to_IR(dlr::DiscreteLehmannRepresentation, g_dlr::AbstractArray, dims=1)

From DLR to IR. g_dlr`: Expansion coefficients in DLR.

source
SparseIR.truncateMethod
truncate(u, s, v; rtol=0.0, lmax=typemax(Int))

Truncate singular value expansion.

Arguments

- `u`, `s`, `v`: Thin singular value expansion
+)

Perform truncated singular value expansion of a kernel.

Perform a truncated singular value expansion (SVE) of an integral kernel kernel : [xmin, xmax] x [ymin, ymax] -> ℝ:

kernel(x, y) == sum(s[l] * u[l](x) * v[l](y) for l in (1, 2, 3, ...)),

where s[l] are the singular values, which are ordered in non-increasing fashion, u[l](x) are the left singular functions, which form an orthonormal system on [xmin, xmax], and v[l](y) are the right singular functions, which form an orthonormal system on [ymin, ymax].

The SVE is mapped onto the singular value decomposition (SVD) of a matrix by expanding the kernel in piecewise Legendre polynomials (by default by using a collocation).

Arguments

  • K::AbstractKernel: Integral kernel to take SVE from.

  • ε::Real: Accuracy target for the basis: attempt to have singular values down to a relative magnitude of ε, and have each singular value and singular vector be accurate to ε. A Twork with a machine epsilon of ε^2 or lower is required to satisfy this. Defaults to 2.2e-16 if xprec is available, and 1.5e-8 otherwise.

  • cutoff::Real: Relative cutoff for the singular values. A Twork with machine epsilon of cutoff is required to satisfy this. Defaults to a small multiple of the machine epsilon.

    Note that cutoff and ε serve distinct purposes. cutoff reprsents the accuracy to which the kernel is reproduced, whereas ε is the accuracy to which the singular values and vectors are guaranteed.

  • lmax::Integer: Maximum basis size. If given, only at most the lmax most significant singular values and associated singular functions are returned.

  • `n_gauss (int): Order of Legendre polynomials. Defaults to kernel hinted value.

  • Twork: Working data type. Defaults to a data type with machine epsilon of at mostε^2and at mostcutoff`, or otherwise most accurate data type available.

  • sve_strat::AbstractSVE: SVE to SVD translation strategy. Defaults to SamplingSVE, optionally wrapped inside of a CentrosymmSVE if the kernel is centrosymmetric.

  • svd_strat ('fast' or 'default' or 'accurate'): SVD solver. Defaults to fast (ID/RRQR) based solution when accuracy goals are moderate, and more accurate Jacobi-based algorithm otherwise.

Returns: An SVEResult containing the truncated singular value expansion.

source
SparseIR.SamplingSVEType
SamplingSVE <: AbstractSVE

SVE to SVD translation by sampling technique [1].

Maps the singular value expansion (SVE) of a kernel kernel onto the singular value decomposition of a matrix A. This is achieved by choosing two sets of Gauss quadrature rules: (x, wx) and (y, wy) and approximating the integrals in the SVE equations by finite sums. This implies that the singular values of the SVE are well-approximated by the singular values of the following matrix:

A[i, j] = √(wx[i]) * K(x[i], y[j]) * √(wy[j])

and the values of the singular functions at the Gauss sampling points can be reconstructed from the singular vectors u and v as follows:

u[l,i] ≈ √(wx[i]) u[l](x[i])
+v[l,j] ≈ √(wy[j]) u[l](y[j])

[1] P. Hansen, Discrete Inverse Problems, Ch. 3.1

source
SparseIR.accuracyFunction
accuracy(basis::AbstractBasis)

Accuracy of the basis.

Upper bound to the relative error of reprensenting a propagator with the given number of basis functions (number between 0 and 1).

source
SparseIR.canonicalize!Method
canonicalize!(u, v)

Canonicalize basis.

Each SVD (u[l], v[l]) pair is unique only up to a global phase, which may differ from implementation to implementation and also platform. We fix that gauge by demanding u[l](1) > 0. This ensures a diffeomorphic connection to the Legendre polynomials as Λ → 0.

source
SparseIR.conv_radiusFunction
conv_radius(kernel)

Convergence radius of the Matsubara basis asymptotic model.

For improved relative numerical accuracy, the IR basis functions on the Matsubara axis uhat(basis, n) can be evaluated from an asymptotic expression for abs(n) > conv_radius. If isinf(conv_radius), then the asymptotics are unused (the default).

source
SparseIR.default_matsubara_sampling_pointsFunction
default_matsubara_sampling_points(basis::AbstractBasis; positive_only=false)

Default sampling points on the imaginary frequency axis.

Arguments

  • positive_only::Bool: Only return non-negative frequencies. This is useful if the object to be fitted is symmetric in Matsubura frequency, ĝ(ω) == conj(ĝ(-ω)), or, equivalently, real in imaginary time.
source
SparseIR.eval_matrixFunction
eval_matrix(T, basis, x)

Return evaluation matrix from coefficients to sampling points. T <: AbstractSampling.

source
SparseIR.find_extremaMethod
find_extrema(polyFT::PiecewiseLegendreFT; part=nothing, grid=DEFAULT_GRID)

Obtain extrema of Fourier-transformed polynomial.

source
SparseIR.finite_temp_basesFunction
finite_temp_bases(β::Real, ωmax::Real, ε=nothing;
+                  kernel=LogisticKernel(β * ωmax), sve_result=SVEResult(kernel; ε))

Construct FiniteTempBasis objects for fermion and bosons using the same LogisticKernel instance.

source
SparseIR.from_IRFunction
from_IR(dlr::DiscreteLehmannRepresentation, gl::AbstractArray, dims=1)

From IR to DLR. gl`: Expansion coefficients in IR.

source
SparseIR.get_symmetrizedMethod
get_symmetrized(kernel, sign)

Construct a symmetrized version of kernel, i.e. kernel(x, y) + sign * kernel(x, -y).

Beware!

By default, this returns a simple wrapper over the current instance which naively performs the sum. You may want to override this to avoid cancellation.

source
SparseIR.get_tnlMethod
get_tnl(l, w)

Fourier integral of the l-th Legendre polynomial::

Tₗ(ω) == ∫ dx exp(iωx) Pₗ(x)
source
SparseIR.giwMethod
giw(polyFT, wn)

Return model Green's function for reduced frequencies

source
SparseIR.iscentrosymmetricFunction
iscentrosymmetric(kernel)

Return true if kernel(x, y) == kernel(-x, -y) for all values of x and y in range. This allows the kernel to be block-diagonalized, speeding up the singular value expansion by a factor of 4. Defaults to false.

source
SparseIR.matop!Method
matop!(buffer, mat, arr::AbstractArray, op, dim)

Apply the operator op to the matrix mat and to the array arr along the first dimension (dim=1) or the last dimension (dim=N).

source
SparseIR.matop_along_dim!Method
matop_along_dim!(buffer, mat, arr::AbstractArray, dim::Integer, op)

Apply the operator op to the matrix mat and to the array arr along the dimension dim, writing the result to buffer.

source
SparseIR.movedimMethod
movedim(arr::AbstractArray, src => dst)

Move arr's dimension at src to dst while keeping the order of the remaining dimensions unchanged.

source
SparseIR.nsvalsMethod
nsvals(hints)

Upper bound for number of singular values.

Upper bound on the number of singular values above the given threshold, i.e. where s[l] ≥ ε * first(s).

source
SparseIR.phase_stableMethod
phase_stable(poly, wn)

Phase factor for the piecewise Legendre to Matsubara transform.

Compute the following phase factor in a stable way:

exp.(iπ/2 * wn * cumsum(Δx(poly)))
source
SparseIR.piecewiseMethod
piecewise(rule, edges)

Piecewise quadrature with the same quadrature rule, but scaled.

source
SparseIR.rescaleMethod
rescale(basis::FiniteTempBasis, new_β)

Return a basis for different temperature.

Uses the same kernel with the same $ε$, but a different temperature. Note that this implies a different UV cutoff $ωmax$, since $Λ == β * ωmax$ stays constant.

source
SparseIR.segments_xMethod
segments_x(sve_hints::AbstractSVEHints[, T])

Segments for piecewise polynomials on the $x$ axis.

List of segments on the $x$ axis for the associated piecewise polynomial. Should reflect the approximate position of roots of a high-order singular function in $x$.

source
SparseIR.segments_yMethod
segments_y(sve_hints::AbstractSVEHints[, T])

Segments for piecewise polynomials on the $y$ axis.

List of segments on the $y$ axis for the associated piecewise polynomial. Should reflect the approximate position of roots of a high-order singular function in $y$.

source
SparseIR.shift_xmidMethod
shift_xmid(knots, Δx)

Return midpoint relative to the nearest integer plus a shift.

Return the midpoints xmid of the segments, as pair (diff, shift), where shift is in (0, 1, -1) and diff is a float such that xmid == shift + diff to floating point accuracy.

source
SparseIR.significanceFunction
significance(basis::AbstractBasis)

Return vector σ, where 0 ≤ σ[i] ≤ 1 is the significance level of the i-th basis function. If ϵ is the desired accuracy to which to represent a propagator, then any basis function where σ[i] < ϵ can be neglected.

For the IR basis, we simply have that σ[i] = s[i] / first(s).

source
SparseIR.splitMethod
split(poly, x)

Split segment.

Find segment of poly's domain that covers x.

source
SparseIR.statisticsMethod
statistics(basis::AbstractBasis)

Quantum statistic (Statistics instance, Fermionic() or Bosonic()).

source
SparseIR.sve_hintsFunction
sve_hints(kernel, ε)

Provide discretisation hints for the SVE routines.

Advises the SVE routines of discretisation parameters suitable in tranforming the (infinite) SVE into an (finite) SVD problem.

See also AbstractSVEHints.

source
SparseIR.to_IRFunction
to_IR(dlr::DiscreteLehmannRepresentation, g_dlr::AbstractArray, dims=1)

From DLR to IR. g_dlr`: Expansion coefficients in DLR.

source
SparseIR.truncateMethod
truncate(u, s, v; rtol=0.0, lmax=typemax(Int))

Truncate singular value expansion.

Arguments

- `u`, `s`, `v`: Thin singular value expansion
 - `rtol`: Only singular values satisfying `s[l]/s[1] > rtol` are retained.
-- `lmax`: At most the `lmax` most significant singular values are retained.
source
SparseIR.weight_funcFunction
weight_func(kernel, statistics::Statistics)

Return the weight function for the given statistics.

  • Fermion: w(x) == 1
  • Boson: w(y) == 1/tanh(Λ*y/2)
source
SparseIR.xrangeFunction
xrange(kernel)

Return a tuple $(x_\mathrm{min}, x_\mathrm{max})$ delimiting the range of allowed x values.

source
SparseIR.yrangeFunction
yrange(kernel)

Return a tuple $(y_\mathrm{min}, y_\mathrm{max})$ delimiting the range of allowed y values.

source
SparseIR.ΛFunction
Λ(basis::AbstractBasis)
-lambda(basis::AbstractBasis)

Basis cutoff parameter, Λ = β * ωmax, or None if not present

source
SparseIR.βMethod
β(basis::AbstractBasis)
-beta(basis::AbstractBasis)

Inverse temperature or nothing if unscaled basis.

source
SparseIR.ωmaxFunction
ωmax(basis::AbstractBasis)
-wmax(basis::AbstractBasis)

Real frequency cutoff or nothing if unscaled basis.

source
SparseIR._LinAlg.rrqr!Method

Truncated rank-revealing QR decomposition with full column pivoting.

Decomposes a (m, n) matrix A into the product:

A[:,piv] == Q * R

where Q is an (m, k) isometric matrix, R is a (k, n) upper triangular matrix, piv is a permutation vector, and k is chosen such that the relative tolerance tol is met in the equality above.

source
SparseIR._LinAlg.svd2x2Method

Perform the SVD of an arbitrary two-by-two matrix:

  [ a11  a12 ]  =  [  cu  -su ] [ smax     0 ] [  cv   sv ]
-  [ a21  a22 ]     [  su   cu ] [    0  smin ] [ -sv   cv ]

Note that smax and smin can be negative.

source
SparseIR._LinAlg.svd2x2Method

Perform the SVD of upper triangular two-by-two matrix:

  [ f    g   ]  =  [  cu  -su ] [ smax     0 ] [  cv   sv ]
-  [ 0    h   ]     [  su   cu ] [    0  smin ] [ -sv   cv ]

Note that smax and smin can be negative.

source
SparseIR._LinAlg.tsvd!Method

Truncated singular value decomposition.

Decomposes an (m, n) matrix A into the product:

A == U * (s .* VT)

where U is a (m, k) matrix with orthogonal columns, VT is a (k, n) matrix with orthogonal rows and s are the singular values, a set of k nonnegative numbers in non-ascending order. The SVD is truncated in the sense that singular values below tol are discarded.

source
+- `lmax`: At most the `lmax` most significant singular values are retained.
source
SparseIR.weight_funcFunction
weight_func(kernel, statistics::Statistics)

Return the weight function for the given statistics.

  • Fermion: w(x) == 1
  • Boson: w(y) == 1/tanh(Λ*y/2)
source
SparseIR.xrangeFunction
xrange(kernel)

Return a tuple $(x_\mathrm{min}, x_\mathrm{max})$ delimiting the range of allowed x values.

source
SparseIR.yrangeFunction
yrange(kernel)

Return a tuple $(y_\mathrm{min}, y_\mathrm{max})$ delimiting the range of allowed y values.

source
SparseIR.ΛFunction
Λ(basis::AbstractBasis)
+lambda(basis::AbstractBasis)

Basis cutoff parameter, Λ = β * ωmax, or None if not present

source
SparseIR.βMethod
β(basis::AbstractBasis)
+beta(basis::AbstractBasis)

Inverse temperature or nothing if unscaled basis.

source
SparseIR.ωmaxFunction
ωmax(basis::AbstractBasis)
+wmax(basis::AbstractBasis)

Real frequency cutoff or nothing if unscaled basis.

source
SparseIR._LinAlg.rrqr!Method

Truncated rank-revealing QR decomposition with full column pivoting.

Decomposes a (m, n) matrix A into the product:

A[:,piv] == Q * R

where Q is an (m, k) isometric matrix, R is a (k, n) upper triangular matrix, piv is a permutation vector, and k is chosen such that the relative tolerance tol is met in the equality above.

source
SparseIR._LinAlg.svd2x2Method

Perform the SVD of an arbitrary two-by-two matrix:

  [ a11  a12 ]  =  [  cu  -su ] [ smax     0 ] [  cv   sv ]
+  [ a21  a22 ]     [  su   cu ] [    0  smin ] [ -sv   cv ]

Note that smax and smin can be negative.

source
SparseIR._LinAlg.svd2x2Method

Perform the SVD of upper triangular two-by-two matrix:

  [ f    g   ]  =  [  cu  -su ] [ smax     0 ] [  cv   sv ]
+  [ 0    h   ]     [  su   cu ] [    0  smin ] [ -sv   cv ]

Note that smax and smin can be negative.

source
SparseIR._LinAlg.tsvd!Method

Truncated singular value decomposition.

Decomposes an (m, n) matrix A into the product:

A == U * (s .* VT)

where U is a (m, k) matrix with orthogonal columns, VT is a (k, n) matrix with orthogonal rows and s are the singular values, a set of k nonnegative numbers in non-ascending order. The SVD is truncated in the sense that singular values below tol are discarded.

source
diff --git a/dev/public/index.html b/dev/public/index.html index 34b1415..3b01b4c 100644 --- a/dev/public/index.html +++ b/dev/public/index.html @@ -1,8 +1,8 @@ -Public · SparseIR.jl

Public names index

SparseIR.AugmentedBasisType
AugmentedBasis <: AbstractBasis

Augmented basis on the imaginary-time/frequency axis.

Groups a set of additional functions, augmentations, with a given basis. The augmented functions then form the first basis functions, while the rest is provided by the regular basis, i.e.:

u[l](x) == l < naug ? augmentations[l](x) : basis.u[l-naug](x),

where naug = length(augmentations) is the number of added basis functions through augmentation. Similar expressions hold for Matsubara frequencies.

Augmentation is useful in constructing bases for vertex-like quantities such as self-energies [wallerberger2021] and when constructing a two-point kernel that serves as a base for multi-point functions [shinaoka2018].

Warning

Bases augmented with TauConst and TauLinear tend to be poorly conditioned. Care must be taken while fitting and compactness should be enforced if possible to regularize the problem.

While vertex bases, i.e. bases augmented with MatsubaraConst, stay reasonably well-conditioned, it is still good practice to treat the Hartree–Fock term separately rather than including it in the basis, if possible.

See also: MatsubaraConst for vertex basis [wallerberger2021], TauConst, TauLinear for multi-point [shinaoka2018]

source
SparseIR.DiscreteLehmannRepresentationType
DiscreteLehmannRepresentation <: AbstractBasis

Discrete Lehmann representation (DLR) with poles selected according to extrema of IR.

This class implements a variant of the discrete Lehmann representation (DLR) 1. Instead of a truncated singular value expansion of the analytic continuation kernel $K$ like the IR, the discrete Lehmann representation is based on a "sketching" of $K$. The resulting basis is a linear combination of discrete set of poles on the real-frequency axis, continued to the imaginary-frequency axis:

 G(iv) == sum(a[i] / (iv - w[i]) for i in range(L))

Warning The poles on the real-frequency axis selected for the DLR are based on a rank-revealing decomposition, which offers accuracy guarantees. Here, we instead select the pole locations based on the zeros of the IR basis functions on the real axis, which is a heuristic. We do not expect that difference to matter, but please don't blame the DLR authors if we were wrong :-)

source
SparseIR.FiniteTempBasisType
FiniteTempBasis <: AbstractBasis

Intermediate representation (IR) basis for given temperature.

For a continuation kernel K from real frequencies, ω ∈ [-ωmax, ωmax], to imaginary time, τ ∈ [0, β], this type stores the truncated singular value expansion or IR basis:

K(τ, ω) ≈ sum(u[l](τ) * s[l] * v[l](ω) for l in 1:L)

This basis is inferred from a reduced form by appropriate scaling of the variables.

Fields

  • u::PiecewiseLegendrePolyVector: Set of IR basis functions on the imaginary time (tau) axis. These functions are stored as piecewise Legendre polynomials.

    To obtain the value of all basis functions at a point or a array of points x, you can call the function u(x). To obtain a single basis function, a slice or a subset l, you can use u[l].

  • uhat::PiecewiseLegendreFT: Set of IR basis functions on the Matsubara frequency (wn) axis. These objects are stored as a set of Bessel functions.

    To obtain the value of all basis functions at a Matsubara frequency or a array of points wn, you can call the function uhat(wn). Note that we expect reduced frequencies, which are simply even/odd numbers for bosonic/fermionic objects. To obtain a single basis function, a slice or a subset l, you can use uhat[l].

  • s: Vector of singular values of the continuation kernel

  • v::PiecewiseLegendrePoly: Set of IR basis functions on the real frequency (w) axis. These functions are stored as piecewise Legendre polynomials.

    To obtain the value of all basis functions at a point or a array of points w, you can call the function v(w). To obtain a single basis function, a slice or a subset l, you can use v[l].

source
SparseIR.FiniteTempBasisMethod
FiniteTempBasis{S}(β, ωmax, ε=nothing; max_size=nothing, args...)

Construct a finite temperature basis suitable for the given S (Fermionic or Bosonic) and cutoffs β and ωmax.

source
SparseIR.FiniteTempBasisSetType
FiniteTempBasisSet

Type for holding IR bases and sparse-sampling objects.

An object of this type holds IR bases for fermions and bosons and associated sparse-sampling objects.

Fields

  • basis_f::FiniteTempBasis: Fermion basis
  • basis_b::FiniteTempBasis: Boson basis
  • tau::Vector{Float64}: Sampling points in the imaginary-time domain
  • wn_f::Vector{Int}: Sampling fermionic frequencies
  • wn_b::Vector{Int}: Sampling bosonic frequencies
  • smpltauf::TauSampling: Sparse sampling for tau & fermion
  • smpltaub::TauSampling: Sparse sampling for tau & boson
  • smplwnf::MatsubaraSampling: Sparse sampling for Matsubara frequency & fermion
  • smplwnb::MatsubaraSampling: Sparse sampling for Matsubara frequency & boson
  • sve_result::Tuple{PiecewiseLegendrePoly,Vector{Float64},PiecewiseLegendrePoly}: Results of SVE

Getters

  • beta::Float64: Inverse temperature
  • ωmax::Float64: Cut-off frequency
source
SparseIR.LogisticKernelType
LogisticKernel <: AbstractKernel

Fermionic/bosonic analytical continuation kernel.

In dimensionless variables $x = 2 τ/β - 1$, $y = β ω/Λ$, the integral kernel is a function on $[-1, 1] × [-1, 1]$:

\[ K(x, y) = \frac{e^{-Λ y (x + 1) / 2}}{1 + e^{-Λ y}}\]

LogisticKernel is a fermionic analytic continuation kernel. Nevertheless, one can model the $τ$ dependence of a bosonic correlation function as follows:

\[ ∫ \frac{e^{-Λ y (x + 1) / 2}}{1 - e^{-Λ y}} ρ(y) dy = ∫ K(x, y) ρ'(y) dy,\]

with

\[ ρ'(y) = w(y) ρ(y),\]

where the weight function is given by

\[ w(y) = \frac{1}{\tanh(Λ y/2)}.\]

source
SparseIR.MatsubaraFreqType
MatsubaraFreq(n)

Prefactor n of the Matsubara frequency ω = n*π/β

Struct representing the Matsubara frequency ω entering the Fourier transform of a propagator G(τ) on imaginary time τ to its Matsubara equivalent Ĝ(iω) on the imaginary-frequency axis:

        β
+Public · SparseIR.jl

Public names index

SparseIR.AugmentedBasisType
AugmentedBasis <: AbstractBasis

Augmented basis on the imaginary-time/frequency axis.

Groups a set of additional functions, augmentations, with a given basis. The augmented functions then form the first basis functions, while the rest is provided by the regular basis, i.e.:

u[l](x) == l < naug ? augmentations[l](x) : basis.u[l-naug](x),

where naug = length(augmentations) is the number of added basis functions through augmentation. Similar expressions hold for Matsubara frequencies.

Augmentation is useful in constructing bases for vertex-like quantities such as self-energies [wallerberger2021] and when constructing a two-point kernel that serves as a base for multi-point functions [shinaoka2018].

Warning

Bases augmented with TauConst and TauLinear tend to be poorly conditioned. Care must be taken while fitting and compactness should be enforced if possible to regularize the problem.

While vertex bases, i.e. bases augmented with MatsubaraConst, stay reasonably well-conditioned, it is still good practice to treat the Hartree–Fock term separately rather than including it in the basis, if possible.

See also: MatsubaraConst for vertex basis [wallerberger2021], TauConst, TauLinear for multi-point [shinaoka2018]

source
SparseIR.DiscreteLehmannRepresentationType
DiscreteLehmannRepresentation <: AbstractBasis

Discrete Lehmann representation (DLR) with poles selected according to extrema of IR.

This class implements a variant of the discrete Lehmann representation (DLR) 1. Instead of a truncated singular value expansion of the analytic continuation kernel $K$ like the IR, the discrete Lehmann representation is based on a "sketching" of $K$. The resulting basis is a linear combination of discrete set of poles on the real-frequency axis, continued to the imaginary-frequency axis:

 G(iv) == sum(a[i] / (iv - w[i]) for i in range(L))

Warning The poles on the real-frequency axis selected for the DLR are based on a rank-revealing decomposition, which offers accuracy guarantees. Here, we instead select the pole locations based on the zeros of the IR basis functions on the real axis, which is a heuristic. We do not expect that difference to matter, but please don't blame the DLR authors if we were wrong :-)

source
SparseIR.FiniteTempBasisType
FiniteTempBasis <: AbstractBasis

Intermediate representation (IR) basis for given temperature.

For a continuation kernel K from real frequencies, ω ∈ [-ωmax, ωmax], to imaginary time, τ ∈ [0, β], this type stores the truncated singular value expansion or IR basis:

K(τ, ω) ≈ sum(u[l](τ) * s[l] * v[l](ω) for l in 1:L)

This basis is inferred from a reduced form by appropriate scaling of the variables.

Fields

  • u::PiecewiseLegendrePolyVector: Set of IR basis functions on the imaginary time (tau) axis. These functions are stored as piecewise Legendre polynomials.

    To obtain the value of all basis functions at a point or a array of points x, you can call the function u(x). To obtain a single basis function, a slice or a subset l, you can use u[l].

  • uhat::PiecewiseLegendreFT: Set of IR basis functions on the Matsubara frequency (wn) axis. These objects are stored as a set of Bessel functions.

    To obtain the value of all basis functions at a Matsubara frequency or a array of points wn, you can call the function uhat(wn). Note that we expect reduced frequencies, which are simply even/odd numbers for bosonic/fermionic objects. To obtain a single basis function, a slice or a subset l, you can use uhat[l].

  • s: Vector of singular values of the continuation kernel

  • v::PiecewiseLegendrePoly: Set of IR basis functions on the real frequency (w) axis. These functions are stored as piecewise Legendre polynomials.

    To obtain the value of all basis functions at a point or a array of points w, you can call the function v(w). To obtain a single basis function, a slice or a subset l, you can use v[l].

source
SparseIR.FiniteTempBasisMethod
FiniteTempBasis{S}(β, ωmax, ε=nothing; max_size=nothing, args...)

Construct a finite temperature basis suitable for the given S (Fermionic or Bosonic) and cutoffs β and ωmax.

source
SparseIR.FiniteTempBasisSetType
FiniteTempBasisSet

Type for holding IR bases and sparse-sampling objects.

An object of this type holds IR bases for fermions and bosons and associated sparse-sampling objects.

Fields

  • basis_f::FiniteTempBasis: Fermion basis
  • basis_b::FiniteTempBasis: Boson basis
  • tau::Vector{Float64}: Sampling points in the imaginary-time domain
  • wn_f::Vector{Int}: Sampling fermionic frequencies
  • wn_b::Vector{Int}: Sampling bosonic frequencies
  • smpltauf::TauSampling: Sparse sampling for tau & fermion
  • smpltaub::TauSampling: Sparse sampling for tau & boson
  • smplwnf::MatsubaraSampling: Sparse sampling for Matsubara frequency & fermion
  • smplwnb::MatsubaraSampling: Sparse sampling for Matsubara frequency & boson
  • sve_result::Tuple{PiecewiseLegendrePoly,Vector{Float64},PiecewiseLegendrePoly}: Results of SVE

Getters

  • beta::Float64: Inverse temperature
  • ωmax::Float64: Cut-off frequency
source
SparseIR.LogisticKernelType
LogisticKernel <: AbstractKernel

Fermionic/bosonic analytical continuation kernel.

In dimensionless variables $x = 2 τ/β - 1$, $y = β ω/Λ$, the integral kernel is a function on $[-1, 1] × [-1, 1]$:

\[ K(x, y) = \frac{e^{-Λ y (x + 1) / 2}}{1 + e^{-Λ y}}\]

LogisticKernel is a fermionic analytic continuation kernel. Nevertheless, one can model the $τ$ dependence of a bosonic correlation function as follows:

\[ ∫ \frac{e^{-Λ y (x + 1) / 2}}{1 - e^{-Λ y}} ρ(y) dy = ∫ K(x, y) ρ'(y) dy,\]

with

\[ ρ'(y) = w(y) ρ(y),\]

where the weight function is given by

\[ w(y) = \frac{1}{\tanh(Λ y/2)}.\]

source
SparseIR.MatsubaraFreqType
MatsubaraFreq(n)

Prefactor n of the Matsubara frequency ω = n*π/β

Struct representing the Matsubara frequency ω entering the Fourier transform of a propagator G(τ) on imaginary time τ to its Matsubara equivalent Ĝ(iω) on the imaginary-frequency axis:

        β
 Ĝ(iω) = ∫  dτ exp(iωτ) G(τ)      with    ω = n π/β,
-        0

where β is inverse temperature and by convention we include the imaginary unit in the frequency argument, i.e, Ĝ(iω). The frequencies depend on the statistics of the propagator, i.e., we have that:

G(τ - β) = ± G(τ)

where + is for bosons and - is for fermions. The frequencies are restricted accordingly.

  • Bosonic frequency (S == Fermionic): n even (periodic in β)
  • Fermionic frequency (S == Bosonic): n odd (anti-periodic in β)
source
SparseIR.MatsubaraSamplingType
MatsubaraSampling <: AbstractSampling

Sparse sampling in Matsubara frequencies.

Allows the transformation between the IR basis and a set of sampling points in (scaled/unscaled) imaginary frequencies.

source
SparseIR.MatsubaraSamplingMethod
MatsubaraSampling(basis; positive_only=false,
+        0

where β is inverse temperature and by convention we include the imaginary unit in the frequency argument, i.e, Ĝ(iω). The frequencies depend on the statistics of the propagator, i.e., we have that:

G(τ - β) = ± G(τ)

where + is for bosons and - is for fermions. The frequencies are restricted accordingly.

  • Bosonic frequency (S == Fermionic): n even (periodic in β)
  • Fermionic frequency (S == Bosonic): n odd (anti-periodic in β)
source
SparseIR.MatsubaraSamplingType
MatsubaraSampling <: AbstractSampling

Sparse sampling in Matsubara frequencies.

Allows the transformation between the IR basis and a set of sampling points in (scaled/unscaled) imaginary frequencies.

source
SparseIR.MatsubaraSamplingMethod
MatsubaraSampling(basis; positive_only=false,
                   sampling_points=default_matsubara_sampling_points(basis; positive_only),
-                  factorize=true)

Construct a MatsubaraSampling object. If not given, the sampling_points are chosen as the (discrete) extrema of the highest-order basis function in Matsubara. This turns out to be close to optimal with respect to conditioning for this size (within a few percent).

By setting positive_only=true, one assumes that functions to be fitted are symmetric in Matsubara frequency, i.e.:

\[ Ĝ(iν) = conj(Ĝ(-iν))\]

or equivalently, that they are purely real in imaginary time. In this case, sparse sampling is performed over non-negative frequencies only, cutting away half of the necessary sampling space. factorize controls whether the SVD decomposition is computed.

source
SparseIR.RegularizedBoseKernelType
RegularizedBoseKernel <: AbstractKernel

Regularized bosonic analytical continuation kernel.

In dimensionless variables $x = 2 τ/β - 1$, $y = β ω/Λ$, the fermionic integral kernel is a function on $[-1, 1] × [-1, 1]$:

\[ K(x, y) = y \frac{e^{-Λ y (x + 1) / 2}}{e^{-Λ y} - 1}\]

Care has to be taken in evaluating this expression around $y = 0$.

source
SparseIR.TauConstType
TauConst <: AbstractAugmentation

Constant in imaginary time/discrete delta in frequency.

source
SparseIR.TauLinearType
TauLinear <: AbstractAugmentation

Linear function in imaginary time, antisymmetric around β/2.

source
SparseIR.TauSamplingType
TauSampling <: AbstractSampling

Sparse sampling in imaginary time.

Allows the transformation between the IR basis and a set of sampling points in (scaled/unscaled) imaginary time.

source
SparseIR.TauSamplingMethod
TauSampling(basis; sampling_points=default_tau_sampling_points(basis), factorize=true)

Construct a TauSampling object. If not given, the sampling_points are chosen as the extrema of the highest-order basis function in imaginary time. This turns out to be close to optimal with respect to conditioning for this size (within a few percent). factorize controls whether the SVD decomposition is computed.

source
SparseIR.evaluate!Method
evaluate!(buffer::AbstractArray{T,N}, sampling, al; dim=1) where {T,N}

Like evaluate, but write the result to buffer. Please use dim = 1 or N to avoid allocating large temporary arrays internally.

source
SparseIR.evaluateMethod
evaluate(sampling, al; dim=1)

Evaluate the basis coefficients al at the sparse sampling points.

source
SparseIR.fit!Method
fit!(buffer::Array{S,N}, smpl::AbstractSampling, al::Array{T,N}; 
-    dim=1, workarr::Vector{S}) where {S,T,N}

Like fit, but write the result to buffer. Use dim = 1 or dim = N to avoid allocating large temporary arrays internally. The length of workarr cannot be smaller than SparseIR.workarrlength(smpl, al).

source
SparseIR.fitMethod
fit(sampling, al::AbstractArray{T,N}; dim=1)

Fit basis coefficients from the sparse sampling points Please use dim = 1 or N to avoid allocating large temporary arrays internally.

source
SparseIR.overlapMethod
overlap(poly::PiecewiseLegendrePoly, f; 
-    rtol=eps(T), return_error=false, maxevals=10^4, points=T[])

Evaluate overlap integral of poly with arbitrary function f.

Given the function f, evaluate the integral

∫ dx f(x) poly(x)

using adaptive Gauss-Legendre quadrature.

points is a sequence of break points in the integration interval where local difficulties of the integrand may occur (e.g. singularities, discontinuities).

source
+ factorize=true)

Construct a MatsubaraSampling object. If not given, the sampling_points are chosen as the (discrete) extrema of the highest-order basis function in Matsubara. This turns out to be close to optimal with respect to conditioning for this size (within a few percent).

By setting positive_only=true, one assumes that functions to be fitted are symmetric in Matsubara frequency, i.e.:

\[ Ĝ(iν) = conj(Ĝ(-iν))\]

or equivalently, that they are purely real in imaginary time. In this case, sparse sampling is performed over non-negative frequencies only, cutting away half of the necessary sampling space. factorize controls whether the SVD decomposition is computed.

source
SparseIR.RegularizedBoseKernelType
RegularizedBoseKernel <: AbstractKernel

Regularized bosonic analytical continuation kernel.

In dimensionless variables $x = 2 τ/β - 1$, $y = β ω/Λ$, the fermionic integral kernel is a function on $[-1, 1] × [-1, 1]$:

\[ K(x, y) = y \frac{e^{-Λ y (x + 1) / 2}}{e^{-Λ y} - 1}\]

Care has to be taken in evaluating this expression around $y = 0$.

source
SparseIR.TauConstType
TauConst <: AbstractAugmentation

Constant in imaginary time/discrete delta in frequency.

source
SparseIR.TauLinearType
TauLinear <: AbstractAugmentation

Linear function in imaginary time, antisymmetric around β/2.

source
SparseIR.TauSamplingType
TauSampling <: AbstractSampling

Sparse sampling in imaginary time.

Allows the transformation between the IR basis and a set of sampling points in (scaled/unscaled) imaginary time.

source
SparseIR.TauSamplingMethod
TauSampling(basis; sampling_points=default_tau_sampling_points(basis), factorize=true)

Construct a TauSampling object. If not given, the sampling_points are chosen as the extrema of the highest-order basis function in imaginary time. This turns out to be close to optimal with respect to conditioning for this size (within a few percent). factorize controls whether the SVD decomposition is computed.

source
SparseIR.evaluate!Method
evaluate!(buffer::AbstractArray{T,N}, sampling, al; dim=1) where {T,N}

Like evaluate, but write the result to buffer. Please use dim = 1 or N to avoid allocating large temporary arrays internally.

source
SparseIR.evaluateMethod
evaluate(sampling, al; dim=1)

Evaluate the basis coefficients al at the sparse sampling points.

source
SparseIR.fit!Method
fit!(buffer::Array{S,N}, smpl::AbstractSampling, al::Array{T,N}; 
+    dim=1, workarr::Vector{S}) where {S,T,N}

Like fit, but write the result to buffer. Use dim = 1 or dim = N to avoid allocating large temporary arrays internally. The length of workarr cannot be smaller than SparseIR.workarrlength(smpl, al).

source
SparseIR.fitMethod
fit(sampling, al::AbstractArray{T,N}; dim=1)

Fit basis coefficients from the sparse sampling points Please use dim = 1 or N to avoid allocating large temporary arrays internally.

source
SparseIR.overlapMethod
overlap(poly::PiecewiseLegendrePoly, f; 
+    rtol=eps(T), return_error=false, maxevals=10^4, points=T[])

Evaluate overlap integral of poly with arbitrary function f.

Given the function f, evaluate the integral

∫ dx f(x) poly(x)

using adaptive Gauss-Legendre quadrature.

points is a sequence of break points in the integration interval where local difficulties of the integrand may occur (e.g. singularities, discontinuities).

source
diff --git a/dev/refs.bib b/dev/refs.bib index 0f81636..478de1d 100644 --- a/dev/refs.bib +++ b/dev/refs.bib @@ -1342,11 +1342,11 @@ @article{NunezFernandez2022 publisher = {American Physical Society} } -@unpublished{Michalek2024, +@misc{Michalek2024, author = {Michalek, Matthias}, year = {2024}, title = {Solving the Anderson impurity model with intermediate representation of the parquet equations}, - note = {Master's thesis} + note = {Bachelor's thesis, TU Wien} } @article{Wang2020, @@ -1420,13 +1420,14 @@ @misc{Helton2022 } @article{Shinaoka2022, - author = {Shinaoka, Hiroshi and Chikano, Naoya and Gull, Emanuel and Li, Jia and Nomoto, Takuya and Otsuki, Junya and Wallerberger, Markus and Wang, Tianchun and Yoshimi, Kazuyoshi}, - year = {2022-09}, - journal = {SciPost Physics Lecture Notes}, - title = {Efficient ab initio many-body calculations based on sparse modeling of Matsubara Green’s function}, - doi = {10.21468/scipostphyslectnotes.63}, - issn = {2590-1990}, - publisher = {Stichting SciPost} + title = {{Efficient ab initio many-body calculations based on sparse modeling of Matsubara Green's function}}, + author = {Hiroshi Shinaoka and Naoya Chikano and Emanuel Gull and Jia Li and Takuya Nomoto and Junya Otsuki and Markus Wallerberger and Tianchun Wang and Kazuyoshi Yoshimi}, + journal = {SciPost Phys. Lect. Notes}, + pages = {63}, + year = {2022}, + publisher = {SciPost}, + doi = {10.21468/SciPostPhysLectNotes.63}, + url = {https://scipost.org/10.21468/SciPostPhysLectNotes.63} } @Comment{jabref-meta: databaseType:biblatex;} diff --git a/dev/search_index.js b/dev/search_index.js index ca32920..4518177 100644 --- a/dev/search_index.js +++ b/dev/search_index.js @@ -1,3 +1,3 @@ var documenterSearchIndex = {"docs": -[{"location":"guide/#guide","page":"Guide","title":"Introduction","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"We present SparseIR.jl, a Julia library for constructing and working with the intermediate representation of correlation functions [1–4]. The intermediate representation (IR) takes the matrix kernel occurring in transforming propagators between the real-frequency axis and the imaginary-time axis and performs a singular value expansion (SVE) on it, decomposing it into a set of singular values as well as two sets of functions. One of those lives on the real-frequency axis and one on the imaginary-time axis. Expressing a propagator in terms of either basis–by an ordinary least squares fit–then allows us to easily transition between them. In combination with a prescription for constructing sparse sets of sampling points on each axis, we have a method for optimally compressing propagators.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"SparseIR.jl implements the intermediate representation, providing on-the-fly computation of basis functions and singular values accurate to full precision along with routines for sparse sampling. It is further fully unit tested, featuring near-complete code coverage. Here, we will explain its inner workings by means of an example use case. In preparing this document, SparseIR.jl version 1.0.18 and Julia version 1.11.1 were used.","category":"page"},{"location":"guide/#Problem-statement","page":"Guide","title":"Problem statement","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"We take a problem to be solved from the sparse-ir paper [4].","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"Let us perform self-consistent second-order perturbation theory for the single impurity Anderson model at finite temperature. Its Hamiltonian is given by H = U c^dagger_uparrow c^dagger_downarrow c_downarrow c_uparrow + sum_psigma big(V_psigma f_psigma^dagger c_sigma + V_psigma^* c_sigma^dagger f_psigmabig) + sum_psigma epsilon_p f_psigma^dagger f_psigmawhere U is the electron interaction strength, c_sigma annihilates an electron on the impurity, f_psigma annihilates an electron in the bath, dagger denotes the Hermitian conjugate, pinmathbb R is bath momentum, and sigmainuparrow downarrow is spin. The hybridization strength V_psigma and bath energies epsilon_p are chosen such that the non-interacting density of states is semi-elliptic with a half-bandwidth of one, rho_0(omega) = frac2pisqrt1-omega^2, U=12, beta=10, and the system is assumed to be half-filled.","category":"page"},{"location":"guide/#outline","page":"Guide","title":"Outline","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"To provide an overview, we first give the full code used to solve the problem with SparseIR.jl.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"using SparseIR\n\nβ = 10.0; ωmax = 8.0; ε = 1e-6;\n\n# Construct the IR basis and sparse sampling for fermionic propagators\nbasis = FiniteTempBasis{Fermionic}(β, ωmax, ε)\nsτ = TauSampling(basis)\nsiω = MatsubaraSampling(basis; positive_only=true)\n\n# Solve the single impurity Anderson model coupled to a bath with a\n# semicircular density of states with unit half bandwidth.\nU = 1.2\nρ₀(ω) = 2/π * √(1 - clamp(ω, -1, +1)^2)\n\n# Compute the IR basis coefficients for the non-interacting propagator\nρ₀l = overlap.(basis.v, ρ₀)\nG₀l = -basis.s .* ρ₀l\n\n# Self-consistency loop: alternate between second-order expression for the\n# self-energy and the Dyson equation until convergence.\nGl = copy(G₀l)\nΣl = zero(Gl)\nGl_prev = zero(Gl)\nG₀iω = evaluate(siω, G₀l)\nwhile !isapprox(Gl, Gl_prev, rtol=ε)\n Gl_prev = copy(Gl)\n Gτ = evaluate(sτ, Gl)\n Στ = @. U^2 * Gτ^3\n Σl = fit(sτ, Στ)\n Σiω = evaluate(siω, Σl)\n Giω = @. (G₀iω^-1 - Σiω)^-1\n Gl = fit(siω, Giω)\nend","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"Note that this script as presented is optimized for readability instead of performance; in practice, you would want to make minor adjustments to ensure maximum type inferrability and full type stability, among other things putting the code in a function instead of executing in global scope. Such an performance-optimized version is provided in Appendix: Optimized script. The following is a detailed explanation of what happens here under the hood and why.","category":"page"},{"location":"guide/#Treatment","page":"Guide","title":"Treatment","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"If we take the second-order expression for the self-energy, which at half filling is simply ","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" Sigma(tau) = U^2 pqtyG(tau)^3","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"and the Dyson equation","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" hat G(mathrmiomega) = pqtypqtyhat G_0(mathrmiomega)^-1 - hatSigma(mathrmiomega)^-1","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"we have a system of two coupled equations. The first one is diagonal in tau and the second is diagonal in mathrmiomega, so we need a way of converting efficiently between these two axes.","category":"page"},{"location":"guide/#Basis-construction","page":"Guide","title":"Basis construction","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"We first import SparseIR and construct an appropriate basis. To do so, we must first choose an appropriate UV frequency cutoff omega_mathrmmax, representing the maximum bandwidth our basis can capture. The non-interacting density of states in our problem is semi-elliptic with half-bandwidth 1. Once we introduce interactions via the interaction strength U, this band splits into the lower and the upper Hubbard bands, centered around omega = pm U2 respectively. So the bandwidth should be around 32 at a minimum, but we choose more than twice that with omega_mathrmmax = 8 to be safe.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"julia> using SparseIR\n\njulia> β = 10.0; ωmax = 8.0; ε = 1e-6;\n\njulia> basis = FiniteTempBasis{Fermionic}(β, ωmax, ε)\n20-element FiniteTempBasis{Fermionic} with β = 10.0, ωmax = 8.0 and singular values:\n 1.4409730317545617\n 1.2153954454510802\n 0.7652662478347486\n 0.49740673945822533\n 0.288562095623106\n 0.1639819552743817\n 0.08901271087151318\n 0.046837974354297436\n 0.023857653233506308\n 0.01179373309602762\n 0.005662400021411787\n 0.0026427291749051072\n 0.0011996720525663963\n 0.0005299554043095754\n 0.00022790287514550545\n 9.544046906619884e-5\n 3.8931895383167936e-5\n 1.5472919567017398e-5\n 5.992753725069063e-6\n 2.2623276239584257e-6","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"There is quite a lot happening behind the scenes in this first innocuous-looking statement, so we will break it down:","category":"page"},{"location":"guide/#Kernel","page":"Guide","title":"Kernel","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"Consider a propagator/Green's function defined on the imaginary-time axis","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" G(tau) equiv -evT_tau A(tau) B(0)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"and the associated spectral function in real frequency rho(omega) = -(1pi) mathrmImG(omega). These are related via","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" G(tau) = -int_-omega_mathrmmax^+omega_mathrmmax ddomega tilde K(tau omega) rho(omega)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"with the integral kernel","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" tilde K(tau omega) = frace^-tauomegae^-betaomega + 1","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"mediating between them. If we perform an SVE on this kernel, yielding the decomposition","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" tilde K(tau omega) = sum_ell=1^infty U_ell(tau) S_ell V_ell(omega)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"with the U_ells and V_ells each forming an orthonormal system, we can write","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" G(tau) = sum_ell=1^infty U_ell(tau) G_ell = sum_ell=1^L U_ell(tau) G_ell + epsilon_L+1(tau)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"with expansion coefficients given by","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" G_ell = -int_-omega_mathrmmax^+omega_mathrmmax ddomega S_ell V_ell(omega) rho(omega)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"The singular values decay at least exponentially quickly log S_ell = order-ell log(betaomega_mathrmmax), so the error epsilon_L+1(tau) we incur by representing the Green's function in this way and cutting off the sum after L terms does too. If we know its expansion coefficients, we can easily compute the propagator's Fourier transform by ","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" hat G(mathrmiomega) = int_0^beta ddtau e^mathrmiomegatau G(tau) approx sum_ell=1^L hat U_ell(mathrmiomega) G_ell","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"where mathrmiomega = (2n+1)mathrmipibeta with n in mathbb Z is a Matsubara frequency. The representation in terms of these expansion coefficients is what is called the intermediate representation and what SparseIR.jl is concerned with.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"To standardize our variables, we define x in -1+1 and y in -1+1 by","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" tau = beta (x+1)2 qand omega = omega_mathrmmax y","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"so that the kernel can be written","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" K(x y) = frace^-Lambda y (x + 1) 2e^-Lambda y + 1","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"with Lambda = betaomega_mathrmmax = 80. This is represented by the object LogisticKernel(80.0), which FiniteTempBasis uses internally. (Image: Logistic kernel used to construct the basis in our problem treatment 𝐾(𝑥,𝑦).)","category":"page"},{"location":"guide/#Singular-value-expansion","page":"Guide","title":"Singular value expansion","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"Central is the singular value expansion's [5] computation, which is handled by the function SVEResult: Its purpose is to construct the decomposition","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" K(x y) approx sum_ell = 0^L U_ell(x) S_ell V_ell(y)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"where U_ell(x) and V_ell(y) are called K's left and right singular functions respectively and S_ell are its singular values. By construction, the singular functions form an orthonormal basis, i.e.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" int ddx U_ell(x) U_ell(x) = delta_ellell = int ddy V_ell(y) V_ell(y)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"and thus above equation is equivalent to a pair of eigenvalue equations","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"left\nbeginaligned\n S_ell U_ell(x) = int ddy K(x y) V_ell(y) \n S_ell V_ell(y) = int ddx K(x y) U_ell(x)\nendaligned\nright","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"Here and in what follows, unless otherwise indicated, integrals are taken to be over the interval -1+1 (because we rescaled to x and y variables).","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"The function first calls the choose_accuracy helper and thereby sets the appropriate working precision. Because we did not specify a working accuracy varepsilon^2, it chooses for us machine precision eps(Float64), i.e. varepsilon approx 22 times 10^-16 and working type Float64x2 - a 128 bits floating point type provided by the MultiFloats.jl package - because in computing the SVD we incur a precision loss of about half our input bits, leaving us with full double accuracy results only if we use quad precision during the computation.\nThen - by calling out to the CentrosymmSVE constructor - a support grid x_i times y_j for the kernel to later be evaluated on is built. Along with these support points weights w_i and z_j are computed. These points and weights consist of repeated scaled Gauss integration rules, such that\n int ddx f(x) approx sum_i f(x_i) w_i\n quadtextandquad\n int ddy g(y) approx sum_j g(y_j) z_j\nTo get an idea regarding the distribution of these sampling points, refer to following figure, which shows x_i times y_j for Lambda = 80: (Image: Sampling point distribution resulting from a Cartesian product of Gauss integration rules.)\nNote:\nThe points do not cover -1 1 times -1 1 but only 0 1 times 0 1. This is actually a special case as we exploit the kernel's centrosymmetry, i.e. K(x y) = K(-x -y). It is straightforward to show that the left/right singular vectors then can be chosen as either odd or even functions.\nConsequentially, we actually sample from a reduced kernel K^mathrmred_pm on 0 1 times 0 1 that is given as either\n K^mathrmred_pm(x y) = K(x y) pm K(x -y)\ngaining a 4-fold speedup (because we take only a quarter of the domain) in constructing the SVE. The full singular functions can be reconstructed by (anti-)symmetrically continuing them to the negative axis. (Image: The reduced kernels. Compare their [0,1] × [0,1] subregions with the sampling point distribution plot above.)\nUsing the integration rules allows us to approximate\nleft\nbeginaligned\n S_ell U_ell(x_i) approx sum_j K(x_i y_j) V_ell(y_j) z_j forall i \n S_ell V_ell(y_j) approx sum_i K(x_i y_j) U_ell(x_i) w_i forall j\nendaligned\nright\nwhich we now multiply by sqrtw_i and sqrtz_j respectively to normalize our basis functions, yielding\nleft\nbeginaligned\n S_ell sqrtw_i U_ell(x_i) approx sum_j sqrtw_i K(x_i y_j) sqrtz_j sqrtz_j V_ell(y_j) \n S_ell sqrtz_j V_ell(y_j) approx sum_i sqrtw_i K(x_i y_j) sqrtz_j sqrtw_i U_ell(x_i)\nendaligned\nright\nIf we now define vectors vec u_ell, vec v_ell and a matrix K with entries u_ell i equiv sqrtw_i U_ell(x_i), v_ell j equiv sqrtz_j V_ell(y_j) and K_ij equiv sqrtw_i K(x_i y_j) sqrtz_j, then\nleft\nbeginaligned\n S_ell u_ell i approx sum_j K_ij v_ell j \n S_ell v_ell j approx sum_i K_ij u_ell i\nendaligned\nright\nor\nleft\nbeginaligned\n S_ell vec u_ell approx K^phantommathrmT vec v_ell \n S_ell vec v_ell approx K^mathrmT vec u_ell\nendaligned\nright\nTogether with the property vec u_ell^mathrmT vec u_ell approx delta_ellell approx vec v_ell^mathrmT vec v_ell we have successfully translated the original SVE problem into an SVD, because\n K = sum_ell S_ell vec u_ell vec v_ell^mathrmT\nThe next step is calling the matrices function which computes the matrix K derived in the previous step.\nnote: Note\nThe function is named in the plural because in the centrosymmetric case it actually returns two matrices K_+ and K_-, one for the even and one for the odd kernel. The SVDs of these matrices are later concatenated, so for simplicity, we will refer to K from here on out.\ninfo: Info\nSpecial care is taken here to avoid FP-arithmetic cancellation around x = -1 and x = +1.\n(Image: Kernel matrices, rotated 90 degrees counterclockwise to make the connection with the (subregion [0,1] × [0,1] of the) previous figure more obvious. Thus we can see how the choice of sampling points has magnified and brought to the matrices' centers the regions of interest. Furthermore, elements with absolute values smaller than 10\\% of the maximum have been omitted to emphasize the structure; this should however not be taken to mean that there is any sparsity to speak of we could exploit in the next step.)\nTake the truncated singular value decomposition (trSVD) of K, or rather, of K_+ and K_-. We use here a custom trSVD routine written by Markus Wallerberger which combines a homemade rank-revealing QR decomposition with GenericLinearAlgebra.svd!. This is necessary because there is currently no trSVD for quad precision types available.\nVia the function truncate, we throw away superfluous terms in our expansion. More specifically, we choose the basis size L such that S_ell S_0 varepsilon for all ell leq L. Here varepsilon is our selected precision, in our case it's equal to the double precision machine epsilon, 2^-52 approx 222 times 10^-16.\nFinally, we need a postprocessing step implemented in postprocess which performs some technical manipulation to turn the SVD result into the SVE we actually want. The functions are represented as piecewise Legendre polynomials, which model a function on the interval x_mathrmmin x_mathrmmax as a set of segments on the intervals a_i a_i+1, where on each interval the function is expanded in scaled Legendre polynomials. The interval endpoints are chosen such that they reflect the approximate position of roots of a high-order singular function in x.","category":"page"},{"location":"guide/#The-finishing-touches","page":"Guide","title":"The finishing touches","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"The difficult part of constructing the FiniteTempBasis is now over. Next we truncate the left and right singular functions by discarding U_ell and V_ell with indices ell L to match the S_ell. The functions are now scaled to imaginary-time and frequency according to","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" tau = beta2 (x + 1) qand omega = omega_mathrmmax y","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"and to match them, the singular values are multiplied by sqrt(beta2)omega, because K(xy) sqrtdd xdd y = K(tauomega) sqrtddtauddomega. We also add to our basis hatU_ell(mathrmiomega), the Fourier transforms of the left singular functions, defined on the fermionic Matsubara frequencies mathrmiomega = mathrmi(2n+1)betapi (with integer n). This is particularly simple, because the Legendre polynomials' Fourier transforms are known analytically and given by spherical Bessel functions, for which we can rely on Bessels.jl [6].","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"We can now take a look at our basis functions to get a feel for them:","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"(Image: The first 6 left singular basis functions on the imaginary-time axis.)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"(Image: The first 6 right singular basis functions on the frequency axis.)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"Looking back at the image of the kernel K(xy) we can imagine how it is reconstructed by multiplying and summing (including a factor S_ell) U_ell(tau) and V_ell(omega). An important property of the left singular functions is interlacing, i.e. U_ell interlaces U_ell+1. A function g with roots alpha_n-1 leq ldots leq alpha_1 interlaces a function f with roots beta_n leq ldots leq beta_1 if","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" beta_n leq alpha_n-1 leq beta_n-1 leq ldots leq beta_1","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"We will use this property in constructing our sparse sampling set.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"(Image: The first 8 Fourier transformed basis functions on the Matsubara frequency axis.)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"As for the Matsubara basis functions, we plot only the non-zero components, i.e. mathrmImhat U_ell(mathrmiomega) with odd ell and mathrmRehat U_ell(mathrmiomega) with even ell.","category":"page"},{"location":"guide/#Constructing-the-samplers","page":"Guide","title":"Constructing the samplers","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"With our basis complete, we construct sparse sampling objects for fermionic propagators on the imaginary-time axis and on the Matsubara frequency axis.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"julia> sτ = TauSampling(basis);\n\njulia> show(sampling_points(sτ))\n[0.018885255323127792, 0.10059312563754808, 0.25218900406693556, 0.4822117319309194, 0.8042299148252774, 1.2376463941125326, 1.8067997157763205, 2.535059399842931, 3.4296355795122793, 4.45886851573216, 5.541131484267839, 6.570364420487721, 7.464940600157068, 8.19320028422368, 8.762353605887466, 9.195770085174722, 9.51778826806908, 9.747810995933065, 9.899406874362452, 9.981114744676873]\n\njulia> siω = MatsubaraSampling(basis; positive_only=true);\n\njulia> show(sampling_points(siω))\nFermionicFreq[FermionicFreq(1), FermionicFreq(3), FermionicFreq(5), FermionicFreq(7), FermionicFreq(9), FermionicFreq(11), FermionicFreq(17), FermionicFreq(27), FermionicFreq(49), FermionicFreq(153)]","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"Both functions first determine a suitable set of sampling points on their respective axis. In the case of TauSampling, the sampling points tau_i are chosen as the extrema of the highest-order basis function in imaginary-time; this works because U_ell has exactly ell roots. This turns out to be close to optimal with respect to conditioning for this size (within a few percent). Similarly, MatsubaraSampling chooses sampling points mathrmiomega_n as the (discrete) extrema of the highest-order basis function in Matsubara. By setting positive_only=true, one assumes that functions to be fitted are symmetric in Matsubara frequency, i.e.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" hat G(mathrmiomega) = qty(hat G(-mathrmiomega))^*","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"In this case, sparse sampling is performed over non-negative frequencies only, cutting away half of the necessary sampling space, so we get only 10 sampling points instead of the 20 in the imaginary-time case.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"Then, both compute design matrices by E^tau_iell = u_ell(tau_i) and E^omega_nell = hatu_ell(iomega_n) as well as their SVDs. We are now able to get the IR basis coefficients of a function that is known on the imaginary-time sampling points by solving the fitting problem","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" G_ell = mathrmargmin_G_ell sum_tau_i normG(tau_i) - sum_ell E^tau_iell G_ell^2","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"which can be done efficiently once the SVD is known. The same can be done on the Matsubara axis","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" G_ell = mathrmargmin_G_ell sum_mathrmiomega_n normhatG(mathrmiomega_n) - sum_ell E^omega_nell G_ell^2","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"and taken together we now have a way of moving efficiently between both. In solving these problems, we need to take their conditioning into consideration; in the case of the Matsubara axis, the problem is somewhat worse conditioned than on the imaginary-time axis due to its discrete nature. We augment it therefore with 4 additional sampling frequencies.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"(Image: Scaling behavior of the fitting problems' conditioning.)","category":"page"},{"location":"guide/#Initializing-the-iteration","page":"Guide","title":"Initializing the iteration","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"Because the non-interacting density of states is given rho_0(omega) = frac2pisqrt1 - omega^2, we can easily get the IR basis coefficients for the non-interacting propagator","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" G_0_ell = -S_ell rho_0_ell = -S_ell int ddomega V_ell(omega) rho_0(omega)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"by utilizing the overlap function, which implements integration.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"julia> U = 1.2\n1.2\n\njulia> ρ₀(ω) = 2/π * √(1 - clamp(ω, -1, +1)^2)\nρ₀ (generic function with 1 method)\n\njulia> ρ₀l = overlap.(basis.v, ρ₀)\n20-element Vector{Float64}:\n 0.601244316541724\n 1.3444106938820255e-17\n -0.3114509472896204\n ⋮\n -4.553649124439119e-18\n -0.04700635138837371\n 1.734723475976807e-18\n\njulia> G₀l = -basis.s .* ρ₀l\n20-element Vector{Float64}:\n -0.8663768456323275\n -1.6339906341599403e-17\n 0.23834289781690587\n ⋮\n 7.045824663886568e-23\n 2.816974873845819e-7\n -3.924512839631511e-24","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"The coefficients of the full Green's function are then initialized with those of the non-interacting one. Also, we will need the non-interacting propagator in Matsubara for the Dyson equation, so we evaluate with the MatsubaraSampling object created before.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"julia> Gl = copy(G₀l)\n20-element Vector{Float64}:\n -0.8663768456323275\n -1.6339906341599403e-17\n ⋮\n 2.816974873845819e-7\n -3.924512839631511e-24\n\njulia> Σl = zero(Gl)\n20-element Vector{ComplexF64}:\n 0.0 + 0.0im\n 0.0 + 0.0im\n ⋮\n 0.0 + 0.0im\n 0.0 + 0.0im\n\njulia> Gl_prev = zero(Gl)\n20-element Vector{Float64}:\n 0.0\n 0.0\n ⋮\n 0.0\n 0.0\n\njulia> G₀iω = evaluate(siω, G₀l)\n10-element Vector{ComplexF64}:\n 1.0546844383198476e-16 - 1.468055523701327im\n 1.6747120525708993e-16 - 0.8633270688082162im\n ⋮\n 1.627612150170272e-17 - 0.06489281188294724im\n 6.134766817544449e-19 - 0.020802317001514643im","category":"page"},{"location":"guide/#Self-consistency-loop","page":"Guide","title":"Self-consistency loop","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"We are now ready to tackle the coupled equations from the start, and will state them here for the reader's convenience:","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" Sigma(tau) = U^2 pqtyG(tau)^3","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"and the Dyson equation","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" hat G(mathrmiomega) = pqtypqtyhat G_0(mathrmiomega)^-1 - hatSigma(mathrmiomega)^-1","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"The first one is diagonal in tau and the second is diagonal in mathrmiomega, so we employ the IR basis to efficiently convert between the two bases. Starting with our approximation to G_ell we evaluate in the tau-basis to get G(tau), from which we can compute the self-energy on the sampling points Sigma(tau) according to the first equation. This can now be fitted to the tau-basis to get Sigma_ell, and from there hatSigma(mathrmiomega) via evaluation in the mathrmiomega-basis. Now the Dyson equation is used to get hat G(mathrmiomega) on the sampling frequencies, which is then fitted to the mathrmiomega-basis yielding G_ell and completing the loop. This is now performed until convergence.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"julia> while !isapprox(Gl, Gl_prev, rtol=ε)\n Gl_prev = copy(Gl)\n Gτ = evaluate(sτ, Gl)\n Στ = @. U^2 * Gτ^3\n Σl = fit(sτ, Στ)\n Σiω = evaluate(siω, Σl)\n Giω = @. (G₀iω^-1 - Σiω)^-1\n Gl = fit(siω, Giω)\n end","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"This is what one iteration looks like spelled out in equations:","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"beginaligned\n G^mathrmprev_ell = G_ell \n G(tau_i) = sum_ell U_ell(tau_i) G_ell \n Sigma(tau_i) = U^2 pqtyG(tau_i)^3 \n Sigma_ell = mathrmargmin_Sigma_ell sum_tau_i normSigma(tau_i) - sum_ell U_ell(tau_i) Sigma_ell^2 \n hatSigma(mathrmiomega_n) = sum_ell hat U_ell(mathrmiomega_n) Sigma_ell \n hat G(mathrmiomega_n) = pqtypqtyhat G_0(mathrmiomega_n)^-1 - hatSigma(mathrmiomega_n)^-1 \n G_ell = mathrmargmin_G_ell sum_mathrmiomega_n normhat G(mathrmiomega_n) - sum_ell hat U_ell(mathrmiomega_n) G_ell^2\nendaligned","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"We consider the iteration converged when the difference between subsequent iterations does not exceed the basis accuracy, i.e. when","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" normG_ell - G^mathrmprev_ell leq varepsilon maxBqtynormG_ell normG^mathrmprev_ell","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"where the norm is normG_ell^2 = sum_ell=1^L G_ell^2.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"The entire script, as presented in Appendix: Optimized script, takes around 60ms to run and allocates roughly 19MiB on a laptop CPU from 2019.","category":"page"},{"location":"guide/#Visualizing-the-solution","page":"Guide","title":"Visualizing the solution","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"To plot our solution for the self-energy, we create a MatsubaraSampling object on a dense box of sampling frequencies. In this case, we only need it for expanding, i.e. multiplying a vector, hence there is no need for constructing the SVD, so we pass factorize=false.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"julia> box = FermionicFreq.(1:2:79)\n40-element Vector{FermionicFreq}:\n π/β\n 3π/β\n ⋮\n 77π/β\n 79π/β\n\njulia> siω_box = MatsubaraSampling(basis; sampling_points=box, factorize=false);\n\njulia> Σiω_box = evaluate(siω_box, Σl)\n40-element Vector{ComplexF64}:\n -6.067770915322836e-17 - 0.09325923974719101im\n 2.0279596075077236e-17 - 0.1225916020773678im\n ⋮\n -6.624594477591435e-17 - 0.014786512975659354im\n -7.08391512971528e-17 - 0.01441676347590391im","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"We are now in a position to visualize the results of our calculation:","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"In the main plot, the imaginary part of the self-energy in Matsubara alongside the sampling points on which it was computed. This illustrates very nicely one of the main advantages of our method: During the entire course of the iteration we only ever need to store and calculate with the values of all functions on the sparse set of sampling points and are still able to expand the result the a dense frequency set in the end.\nIn the inset, the IR basis coefficients of the self-energy and of the propagator, along with the basis singular values. We only plot the non-vanishing basis coefficients, which are those at odd values of ell because the real parts of hat G(mathrmiomega) and hat Sigma(mathrmiomega) are almost zero. The singular values S_ellS_1 are the bound for absG_l G_1 and absSigma_ell Sigma_1.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"(Image: Self-energy calculated in the self-consistency iteration. The inset shows the IR basis coefficients corresponding to the self-energy and the propagator.)","category":"page"},{"location":"guide/#Summary-and-outlook","page":"Guide","title":"Summary and outlook","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"We introduced SparseIR.jl, a full featured implementation of the intermediate representation in the Julia programming language. By means of a worked example, we explained in detail how to use it and the way it works internally. In this example, we solved an Anderson impurity model with elliptical density of states to second order via a self-consistent loop. We successfully obtained the self-energy (accurate to second order) with minimal computational effort.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"Regarding further work, perhaps the single most obvious direction is the extension to multi-particle quantities; And indeed, [7, 8] did exactly this, with Markus Wallerberger writing the as of yet unpublished Julia library OvercompleteIR.jl which builds on top of SparseIR.jl. This library has already found applications in solving the parquet equations for the Hubbard model and for the Anderson impurity model [9].","category":"page"},{"location":"guide/#References","page":"Guide","title":"References","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"H. Shinaoka, J. Otsuki, M. Ohzeki and K. Yoshimi. Compressing Green's function using intermediate representation between imaginary-time and real-frequency domains. Physical Review B 96, 35147 (2017).\n\n\n\nJ. Li, M. Wallerberger, N. Chikano, C.-N. Yeh, E. Gull and H. Shinaoka. Sparse sampling approach to efficient ab initio calculations at finite temperature. Physical Review B 101, 035144 (2020).\n\n\n\nH. Shinaoka, N. Chikano, E. Gull, J. Li, T. Nomoto, J. Otsuki, M. Wallerberger, T. Wang and K. Yoshimi. Efficient ab initio many-body calculations based on sparse modeling of Matsubara Green’s function. SciPost Physics Lecture Notes (2022-09).\n\n\n\nM. Wallerberger, S. Badr, S. Hoshino, S. Huber, F. Kakizawa, T. Koretsune, Y. Nagai, K. Nogaki, T. Nomoto, H. Mori, J. Otsuki, S. Ozaki, T. Plaikner, R. Sakurai, C. Vogel, N. Witt, K. Yoshimi and H. Shinaoka, sparse-ir: Optimal compression and sparse sampling of many-body propagators. SoftwareX 21, 101266 (2023-02).\n\n\n\nP. C. Hansen. Discrete Inverse Problems: Insights and Algorithms (SIAM, 2010).\n\n\n\nM. Helton and O. Smith. Bessels.jl (2022).\n\n\n\nH. Shinaoka, J. Otsuki, K. Haule, M. Wallerberger, E. Gull, K. Yoshimi and M. Ohzeki. Overcomplete compact representation of two-particle Green's functions. Physical Review B 97, 205111 (2018-05).\n\n\n\nM. Wallerberger, H. Shinaoka and A. Kauch. Solving the Bethe-Salpeter equation with exponential convergence. Physical Review Research 3, 033168 (2021-08).\n\n\n\nM. Michalek. Solving the Anderson impurity model with intermediate representation of the parquet equations. Master's thesis.\n\n\n\n","category":"page"},{"location":"guide/#optimized-script","page":"Guide","title":"Appendix: Optimized script","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"With minimal modifications we can transform our code to be more optimized for performance:","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"Put script in a function. This is because globals are type instable in Julia.\nAdd ::Vector{Float64} annotation to ensure type inferrability of ρ₀l.\nGl in the loop will be a Vector{ComplexF64} in the loop, so make it complex right away for type stability.\nPreallocate and reuse arrays to remove allocations in the loop, minimizing total allocations and time spent garbage collecting. Here we benefit from SparseIR.jl providing in-place variants fit! and evaluate!.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"using SparseIR\n\nfunction main(; β=10.0, ωmax=8.0, ε=1e-6)\n # Construct the IR basis and sparse sampling for fermionic propagators\n basis = FiniteTempBasis{Fermionic}(β, ωmax, ε)\n sτ = TauSampling(basis)\n siω = MatsubaraSampling(basis; positive_only=true)\n\n # Solve the single impurity Anderson model coupled to a bath with a\n # semicircular density of states with unit half bandwidth.\n U = 1.2\n ρ₀(ω) = 2 / π * √(1 - clamp(ω, -1, +1)^2)\n\n # Compute the IR basis coefficients for the non-interacting propagator\n ρ₀l = overlap.(basis.v, ρ₀)::Vector{Float64}\n G₀l = -basis.s .* ρ₀l\n\n # Self-consistency loop: alternate between second-order expression for the\n # self-energy and the Dyson equation until convergence.\n Gl = complex(G₀l)\n G₀iω = evaluate(siω, G₀l)\n\n # Preallocate arrays for the self-energy and the Green's function\n Σl = similar(Gl)\n Στ = similar(Gl, ComplexF64, length(sampling_points(sτ)))\n Σiω = similar(G₀iω)\n Gτ = similar(Στ)\n Giω = similar(G₀iω)\n\n Gl_prev = zero(Gl)\n while !isapprox(Gl, Gl_prev, rtol=ε)\n Gl_prev .= Gl\n evaluate!(Gτ, sτ, Gl)\n @. Στ = U^2 * Gτ^3\n fit!(Σl, sτ, Στ)\n evaluate!(Σiω, siω, Σl)\n @. Giω = (G₀iω^-1 - Σiω)^-1\n fit!(Gl, siω, Giω)\n end\n return basis, Σl\nend","category":"page"},{"location":"private/","page":"Private","title":"Private","text":"CurrentModule = SparseIR","category":"page"},{"location":"private/#Private-names-index","page":"Private","title":"Private names index","text":"","category":"section"},{"location":"private/","page":"Private","title":"Private","text":"These are not considered API and therefore not covered by any semver promises.","category":"page"},{"location":"private/","page":"Private","title":"Private","text":"Modules = [SparseIR]\nPrivate = true\nPublic = false","category":"page"},{"location":"private/#Core.Int-Tuple{MatsubaraFreq}","page":"Private","title":"Core.Int","text":"Get prefactor n for the Matsubara frequency ω = n*π/β\n\n\n\n\n\n","category":"method"},{"location":"private/#Core.Integer-Tuple{MatsubaraFreq}","page":"Private","title":"Core.Integer","text":"Get prefactor n for the Matsubara frequency ω = n*π/β\n\n\n\n\n\n","category":"method"},{"location":"private/#Core.Union-Union{Tuple{MatsubaraFreq{S}}, Tuple{S}} where S","page":"Private","title":"Core.Union","text":"(polyFT::PiecewiseLegendreFT)(ω)\n\nObtain Fourier transform of polynomial for given MatsubaraFreq ω.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.AbstractAugmentation","page":"Private","title":"SparseIR.AbstractAugmentation","text":"AbstractAugmentation\n\nScalar function in imaginary time/frequency.\n\nThis represents a single function in imaginary time and frequency, together with some auxiliary methods that make it suitable for augmenting a basis.\n\nSee also: AugmentedBasis\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.AbstractBasis","page":"Private","title":"SparseIR.AbstractBasis","text":"AbstractBasis\n\nAbstract base class for bases on the imaginary-time axis.\n\nLet basis be an abstract basis. Then we can expand a two-point propagator G(τ), where τ is imaginary time, into a set of basis functions:\n\nG(τ) == sum(basis.u[l](τ) * g[l] for l in 1:length(basis)) + ϵ(τ),\n\nwhere basis.u[l] is the l-th basis function, g[l] is the associated expansion coefficient and ϵ(τ) is an error term. Similarly, the Fourier transform Ĝ(n), where n is now a Matsubara frequency, can be expanded as follows:\n\nĜ(n) == sum(basis.uhat[l](n) * g[l] for l in 1:length(basis)) + ϵ(n),\n\nwhere basis.uhat[l] is now the Fourier transform of the basis function.\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.AbstractKernel","page":"Private","title":"SparseIR.AbstractKernel","text":"(kernel::AbstractKernel)(x, y[, x₊, x₋])\n\nEvaluate kernel at point (x, y).\n\nThe parameters x₊ and x₋, if given, shall contain the values of x - xₘᵢₙ and xₘₐₓ - x, respectively. This is useful if either difference is to be formed and cancellation expected.\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.AbstractKernel-2","page":"Private","title":"SparseIR.AbstractKernel","text":"AbstractKernel\n\nIntegral kernel K(x, y).\n\nAbstract base type for an integral kernel, i.e. a AbstractFloat binary function K(x y) used in a Fredhold integral equation of the first kind:\n\n u(x) = K(x y) v(y) dy\n\nwhere x x_mathrmmin x_mathrmmax and y y_mathrmmin y_mathrmmax. For its SVE to exist, the kernel must be square-integrable, for its singular values to decay exponentially, it must be smooth.\n\nIn general, the kernel is applied to a scaled spectral function ρ(y) as:\n\n K(x y) ρ(y) dy\n\nwhere ρ(y) = w(y) ρ(y).\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.AbstractSVEHints","page":"Private","title":"SparseIR.AbstractSVEHints","text":"AbstractSVEHints\n\nDiscretization hints for singular value expansion of a given kernel.\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.AbstractSampling","page":"Private","title":"SparseIR.AbstractSampling","text":"AbstractSampling\n\nAbstract type for sparse sampling.\n\nEncodes the \"basis transformation\" of a propagator from the truncated IR basis coefficients G_ir[l] to time/frequency sampled on sparse points G(x[i]) together with its inverse, a least squares fit:\n\n ________________ ___________________\n | | evaluate | |\n | Basis |---------------->| Value on |\n | coefficients |<----------------| sampling points |\n |________________| fit |___________________|\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.CentrosymmSVE","page":"Private","title":"SparseIR.CentrosymmSVE","text":"CentrosymmSVE <: AbstractSVE\n\nSVE of centrosymmetric kernel in block-diagonal (even/odd) basis.\n\nFor a centrosymmetric kernel K, i.e., a kernel satisfying: K(x, y) == K(-x, -y), one can make the following ansatz for the singular functions:\n\nu[l](x) = ured[l](x) + sign[l] * ured[l](-x)\nv[l](y) = vred[l](y) + sign[l] * ured[l](-y)\n\nwhere sign[l] is either +1 or -1. This means that the singular value expansion can be block-diagonalized into an even and an odd part by (anti-)symmetrizing the kernel:\n\nK_even = K(x, y) + K(x, -y)\nK_odd = K(x, y) - K(x, -y)\n\nThe lth basis function, restricted to the positive interval, is then the singular function of one of these kernels. If the kernel generates a Chebyshev system [1], then even and odd basis functions alternate.\n\n[1]: A. Karlin, Total Positivity (1968).\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.LogisticKernelOdd","page":"Private","title":"SparseIR.LogisticKernelOdd","text":"LogisticKernelOdd <: AbstractReducedKernel\n\nFermionic analytical continuation kernel, odd.\n\nIn dimensionless variables x = 2τβ - 1, y = βωΛ, the fermionic integral kernel is a function on -1 1 -1 1:\n\n K(x y) = -fracsinh(Λ x y 2)cosh(Λ y 2)\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.PiecewiseLegendreFT","page":"Private","title":"SparseIR.PiecewiseLegendreFT","text":"PiecewiseLegendreFT <: Function\n\nFourier transform of a piecewise Legendre polynomial.\n\nFor a given frequency index n, the Fourier transform of the Legendre function is defined as:\n\n p̂(n) == ∫ dx exp(im * π * n * x / (xmax - xmin)) p(x)\n\nThe polynomial is continued either periodically (freq=:even), in which case n must be even, or antiperiodically (freq=:odd), in which case n must be odd.\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.PiecewiseLegendrePoly","page":"Private","title":"SparseIR.PiecewiseLegendrePoly","text":"PiecewiseLegendrePoly <: Function\n\nPiecewise Legendre polynomial.\n\nModels a function on the interval xmin xmax as a set of segments on the intervals Si = ai ai+1, where on each interval the function is expanded in scaled Legendre polynomials.\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.PiecewiseLegendrePolyVector","page":"Private","title":"SparseIR.PiecewiseLegendrePolyVector","text":"PiecewiseLegendrePolyVector\n\nContains a Vector{PiecewiseLegendrePoly}.\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.PowerModel","page":"Private","title":"SparseIR.PowerModel","text":"PowerModel\n\nModel from a high-frequency series expansion::\n\nA(iω) == sum(A[n] / (iω)^(n+1) for n in 1:N)\n\nwhere iω == i * π2 * wn is a reduced imaginary frequency, i.e., wn is an odd/even number for fermionic/bosonic frequencies.\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.ReducedKernel","page":"Private","title":"SparseIR.ReducedKernel","text":"ReducedKernel\n\nRestriction of centrosymmetric kernel to positive interval.\n\nFor a kernel K on -1 1 -1 1 that is centrosymmetric, i.e. K(x y) = K(-x -y), it is straight-forward to show that the left/right singular vectors can be chosen as either odd or even functions.\n\nConsequentially, they are singular functions of a reduced kernel K_mathrmred on 0 1 0 1 that is given as either:\n\n K_mathrmred(x y) = K(x y) pm K(x -y)\n\nThis kernel is what this type represents. The full singular functions can be reconstructed by (anti-)symmetrically continuing them to the negative axis.\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.RegularizedBoseKernelOdd","page":"Private","title":"SparseIR.RegularizedBoseKernelOdd","text":"RegularizedBoseKernelOdd <: AbstractReducedKernel\n\nBosonic analytical continuation kernel, odd.\n\nIn dimensionless variables x = 2 τ β - 1, y = β ω Λ, the fermionic integral kernel is a function on -1 1 -1 1:\n\n K(x y) = -y fracsinh(Λ x y 2)sinh(Λ y 2)\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.Rule","page":"Private","title":"SparseIR.Rule","text":"Rule{T<:AbstractFloat}\n\nQuadrature rule.\n\nApproximation of an integral over [a, b] by a sum over discrete points x with weights w:\n\n f(x) ω(x) dx _i f(x_i) w_i\n\nwhere we generally have superexponential convergence for smooth f(x) in the number of quadrature points.\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.SVEResult-Tuple{SparseIR.AbstractKernel}","page":"Private","title":"SparseIR.SVEResult","text":"SVEResult(kernel::AbstractKernel;\n Twork=nothing, ε=nothing, lmax=typemax(Int),\n n_gauss=nothing, svd_strat=:auto,\n sve_strat=iscentrosymmetric(kernel) ? CentrosymmSVE : SamplingSVE\n)\n\nPerform truncated singular value expansion of a kernel.\n\nPerform a truncated singular value expansion (SVE) of an integral kernel kernel : [xmin, xmax] x [ymin, ymax] -> ℝ:\n\nkernel(x, y) == sum(s[l] * u[l](x) * v[l](y) for l in (1, 2, 3, ...)),\n\nwhere s[l] are the singular values, which are ordered in non-increasing fashion, u[l](x) are the left singular functions, which form an orthonormal system on [xmin, xmax], and v[l](y) are the right singular functions, which form an orthonormal system on [ymin, ymax].\n\nThe SVE is mapped onto the singular value decomposition (SVD) of a matrix by expanding the kernel in piecewise Legendre polynomials (by default by using a collocation).\n\nArguments\n\nK::AbstractKernel: Integral kernel to take SVE from.\nε::Real: Accuracy target for the basis: attempt to have singular values down to a relative magnitude of ε, and have each singular value and singular vector be accurate to ε. A Twork with a machine epsilon of ε^2 or lower is required to satisfy this. Defaults to 2.2e-16 if xprec is available, and 1.5e-8 otherwise.\ncutoff::Real: Relative cutoff for the singular values. A Twork with machine epsilon of cutoff is required to satisfy this. Defaults to a small multiple of the machine epsilon.\nNote that cutoff and ε serve distinct purposes. cutoff reprsents the accuracy to which the kernel is reproduced, whereas ε is the accuracy to which the singular values and vectors are guaranteed.\nlmax::Integer: Maximum basis size. If given, only at most the lmax most significant singular values and associated singular functions are returned.\n`n_gauss (int): Order of Legendre polynomials. Defaults to kernel hinted value.\nTwork: Working data type. Defaults to a data type with machine epsilon of at mostε^2and at mostcutoff`, or otherwise most accurate data type available.\nsve_strat::AbstractSVE: SVE to SVD translation strategy. Defaults to SamplingSVE, optionally wrapped inside of a CentrosymmSVE if the kernel is centrosymmetric.\nsvd_strat ('fast' or 'default' or 'accurate'): SVD solver. Defaults to fast (ID/RRQR) based solution when accuracy goals are moderate, and more accurate Jacobi-based algorithm otherwise.\n\nReturns: An SVEResult containing the truncated singular value expansion.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.SamplingSVE","page":"Private","title":"SparseIR.SamplingSVE","text":"SamplingSVE <: AbstractSVE\n\nSVE to SVD translation by sampling technique [1].\n\nMaps the singular value expansion (SVE) of a kernel kernel onto the singular value decomposition of a matrix A. This is achieved by choosing two sets of Gauss quadrature rules: (x, wx) and (y, wy) and approximating the integrals in the SVE equations by finite sums. This implies that the singular values of the SVE are well-approximated by the singular values of the following matrix:\n\nA[i, j] = √(wx[i]) * K(x[i], y[j]) * √(wy[j])\n\nand the values of the singular functions at the Gauss sampling points can be reconstructed from the singular vectors u and v as follows:\n\nu[l,i] ≈ √(wx[i]) u[l](x[i])\nv[l,j] ≈ √(wy[j]) u[l](y[j])\n\n[1] P. Hansen, Discrete Inverse Problems, Ch. 3.1\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.Statistics","page":"Private","title":"SparseIR.Statistics","text":"Statistics(zeta)\n\nAbstract type for quantum statistics (fermionic/bosonic/etc.)\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.accuracy","page":"Private","title":"SparseIR.accuracy","text":"accuracy(basis::AbstractBasis)\n\nAccuracy of the basis.\n\nUpper bound to the relative error of reprensenting a propagator with the given number of basis functions (number between 0 and 1).\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.canonicalize!-Tuple{Any, Any}","page":"Private","title":"SparseIR.canonicalize!","text":"canonicalize!(u, v)\n\nCanonicalize basis.\n\nEach SVD (u[l], v[l]) pair is unique only up to a global phase, which may differ from implementation to implementation and also platform. We fix that gauge by demanding u[l](1) > 0. This ensures a diffeomorphic connection to the Legendre polynomials as Λ → 0.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.choose_accuracy-Tuple{Any, Any, Any}","page":"Private","title":"SparseIR.choose_accuracy","text":"choose_accuracy(ε, Twork[, svd_strat])\n\nChoose work type and accuracy based on specs and defaults\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.compute_unl_inner-Tuple{SparseIR.PiecewiseLegendrePoly, Any}","page":"Private","title":"SparseIR.compute_unl_inner","text":"compute_unl_inner(poly, wn)\n\nCompute piecewise Legendre to Matsubara transform.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.conv_radius","page":"Private","title":"SparseIR.conv_radius","text":"conv_radius(kernel)\n\nConvergence radius of the Matsubara basis asymptotic model.\n\nFor improved relative numerical accuracy, the IR basis functions on the Matsubara axis uhat(basis, n) can be evaluated from an asymptotic expression for abs(n) > conv_radius. If isinf(conv_radius), then the asymptotics are unused (the default).\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.default_matsubara_sampling_points","page":"Private","title":"SparseIR.default_matsubara_sampling_points","text":"default_matsubara_sampling_points(basis::AbstractBasis; positive_only=false)\n\nDefault sampling points on the imaginary frequency axis.\n\nArguments\n\npositive_only::Bool: Only return non-negative frequencies. This is useful if the object to be fitted is symmetric in Matsubura frequency, ĝ(ω) == conj(ĝ(-ω)), or, equivalently, real in imaginary time.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.default_tau_sampling_points","page":"Private","title":"SparseIR.default_tau_sampling_points","text":"default_tau_sampling_points(basis::AbstractBasis)\n\nDefault sampling points on the imaginary time/x axis.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.deriv-Union{Tuple{SparseIR.PiecewiseLegendrePoly}, Tuple{n}, Tuple{SparseIR.PiecewiseLegendrePoly, Val{n}}} where n","page":"Private","title":"SparseIR.deriv","text":"deriv(poly[, ::Val{n}=Val(1)])\n\nGet polynomial for the nth derivative.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.eval_matrix","page":"Private","title":"SparseIR.eval_matrix","text":"eval_matrix(T, basis, x)\n\nReturn evaluation matrix from coefficients to sampling points. T <: AbstractSampling.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.find_extrema-Tuple{SparseIR.PiecewiseLegendreFT}","page":"Private","title":"SparseIR.find_extrema","text":"find_extrema(polyFT::PiecewiseLegendreFT; part=nothing, grid=DEFAULT_GRID)\n\nObtain extrema of Fourier-transformed polynomial.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.finite_temp_bases","page":"Private","title":"SparseIR.finite_temp_bases","text":"finite_temp_bases(β::Real, ωmax::Real, ε=nothing;\n kernel=LogisticKernel(β * ωmax), sve_result=SVEResult(kernel; ε))\n\nConstruct FiniteTempBasis objects for fermion and bosons using the same LogisticKernel instance.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.from_IR","page":"Private","title":"SparseIR.from_IR","text":"from_IR(dlr::DiscreteLehmannRepresentation, gl::AbstractArray, dims=1)\n\nFrom IR to DLR. gl`: Expansion coefficients in IR.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.get_symmetrized-Tuple{SparseIR.AbstractKernel, Any}","page":"Private","title":"SparseIR.get_symmetrized","text":"get_symmetrized(kernel, sign)\n\nConstruct a symmetrized version of kernel, i.e. kernel(x, y) + sign * kernel(x, -y).\n\nwarning: Beware!\nBy default, this returns a simple wrapper over the current instance which naively performs the sum. You may want to override this to avoid cancellation.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.get_tnl-Tuple{Any, Any}","page":"Private","title":"SparseIR.get_tnl","text":"get_tnl(l, w)\n\nFourier integral of the l-th Legendre polynomial::\n\nTₗ(ω) == ∫ dx exp(iωx) Pₗ(x)\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.giw-Tuple{Any, Integer}","page":"Private","title":"SparseIR.giw","text":"giw(polyFT, wn)\n\nReturn model Green's function for reduced frequencies\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.iscentrosymmetric","page":"Private","title":"SparseIR.iscentrosymmetric","text":"iscentrosymmetric(kernel)\n\nReturn true if kernel(x, y) == kernel(-x, -y) for all values of x and y in range. This allows the kernel to be block-diagonalized, speeding up the singular value expansion by a factor of 4. Defaults to false.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.iswellconditioned-Tuple{SparseIR.AbstractBasis}","page":"Private","title":"SparseIR.iswellconditioned","text":"iswellconditioned(basis::AbstractBasis)\n\nReturns true if the sampling is expected to be well-conditioned.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.joinrules-Union{Tuple{AbstractArray{SparseIR.Rule{T}, 1}}, Tuple{T}} where T","page":"Private","title":"SparseIR.joinrules","text":"joinrules(rules)\n\nJoin multiple Gauss quadratures together.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.legder-Union{Tuple{AbstractMatrix{T}}, Tuple{T}, Tuple{AbstractMatrix{T}, Any}} where T","page":"Private","title":"SparseIR.legder","text":"legder\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.legendre-Union{Tuple{Any}, Tuple{T}, Tuple{Any, Type{T}}} where T","page":"Private","title":"SparseIR.legendre","text":"legendre(n[, T])\n\nGauss-Legendre quadrature with n points on [-1, 1].\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.legendre_collocation","page":"Private","title":"SparseIR.legendre_collocation","text":"legendre_collocation(rule, n=length(rule.x))\n\nGenerate collocation matrix from Gauss-Legendre rule.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.legvander-Union{Tuple{T}, Tuple{AbstractVector{T}, Integer}} where T","page":"Private","title":"SparseIR.legvander","text":"legvander(x, deg)\n\nPseudo-Vandermonde matrix of degree deg.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.matop!-Union{Tuple{N}, Tuple{T}, Tuple{S}, Tuple{AbstractArray{S, N}, Any, AbstractArray{T, N}, Any, Any}} where {S, T, N}","page":"Private","title":"SparseIR.matop!","text":"matop!(buffer, mat, arr::AbstractArray, op, dim)\n\nApply the operator op to the matrix mat and to the array arr along the first dimension (dim=1) or the last dimension (dim=N).\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.matop_along_dim!-Union{Tuple{N}, Tuple{T}, Tuple{Any, Any, AbstractArray{T, N}, Any, Any}} where {T, N}","page":"Private","title":"SparseIR.matop_along_dim!","text":"matop_along_dim!(buffer, mat, arr::AbstractArray, dim::Integer, op)\n\nApply the operator op to the matrix mat and to the array arr along the dimension dim, writing the result to buffer.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.matrices-Tuple{SparseIR.SamplingSVE}","page":"Private","title":"SparseIR.matrices","text":"matrices(sve::AbstractSVE)\n\nSVD problems underlying the SVE.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.matrix_from_gauss-Union{Tuple{T}, Tuple{Any, SparseIR.Rule{T}, SparseIR.Rule{T}}} where T","page":"Private","title":"SparseIR.matrix_from_gauss","text":"matrix_from_gauss(kernel, gauss_x, gauss_y)\n\nCompute matrix for kernel from Gauss rules.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.movedim-Union{Tuple{N}, Tuple{T}, Tuple{AbstractArray{T, N}, Pair}} where {T, N}","page":"Private","title":"SparseIR.movedim","text":"movedim(arr::AbstractArray, src => dst)\n\nMove arr's dimension at src to dst while keeping the order of the remaining dimensions unchanged.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.ngauss","page":"Private","title":"SparseIR.ngauss","text":"ngauss(hints)\n\nGauss-Legendre order to use to guarantee accuracy.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.nsvals-Tuple{SparseIR.SVEHintsLogistic}","page":"Private","title":"SparseIR.nsvals","text":"nsvals(hints)\n\nUpper bound for number of singular values.\n\nUpper bound on the number of singular values above the given threshold, i.e. where s[l] ≥ ε * first(s).\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.phase_stable-Tuple{Any, Integer}","page":"Private","title":"SparseIR.phase_stable","text":"phase_stable(poly, wn)\n\nPhase factor for the piecewise Legendre to Matsubara transform.\n\nCompute the following phase factor in a stable way:\n\nexp.(iπ/2 * wn * cumsum(Δx(poly)))\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.piecewise-Tuple{Any, Vector}","page":"Private","title":"SparseIR.piecewise","text":"piecewise(rule, edges)\n\nPiecewise quadrature with the same quadrature rule, but scaled.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.postprocess-Tuple{SparseIR.SamplingSVE, Any, Any, Any}","page":"Private","title":"SparseIR.postprocess","text":"postprocess(sve::AbstractSVE, u, s, v)\n\nConstruct the SVE result from the SVD.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.rescale-Tuple{FiniteTempBasis, Any}","page":"Private","title":"SparseIR.rescale","text":"rescale(basis::FiniteTempBasis, new_β)\n\nReturn a basis for different temperature.\n\nUses the same kernel with the same ε, but a different temperature. Note that this implies a different UV cutoff ωmax, since Λ == β * ωmax stays constant.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.reseat-Tuple{SparseIR.Rule, Any, Any}","page":"Private","title":"SparseIR.reseat","text":"reseat(rule, a, b)\n\nReseat quadrature rule to new domain.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.roots-Tuple{SparseIR.PiecewiseLegendrePoly}","page":"Private","title":"SparseIR.roots","text":"roots(poly)\n\nFind all roots of the piecewise polynomial poly.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.scale-Tuple{Any, Any}","page":"Private","title":"SparseIR.scale","text":"scale(rule, factor)\n\nScale weights by factor.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.segments_x-Union{Tuple{SparseIR.SVEHintsLogistic}, Tuple{T}, Tuple{SparseIR.SVEHintsLogistic, Type{T}}} where T","page":"Private","title":"SparseIR.segments_x","text":"segments_x(sve_hints::AbstractSVEHints[, T])\n\nSegments for piecewise polynomials on the x axis.\n\nList of segments on the x axis for the associated piecewise polynomial. Should reflect the approximate position of roots of a high-order singular function in x.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.segments_y-Union{Tuple{SparseIR.SVEHintsLogistic}, Tuple{T}, Tuple{SparseIR.SVEHintsLogistic, Type{T}}} where T","page":"Private","title":"SparseIR.segments_y","text":"segments_y(sve_hints::AbstractSVEHints[, T])\n\nSegments for piecewise polynomials on the y axis.\n\nList of segments on the y axis for the associated piecewise polynomial. Should reflect the approximate position of roots of a high-order singular function in y.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.shift_xmid-Tuple{Any, Any}","page":"Private","title":"SparseIR.shift_xmid","text":"shift_xmid(knots, Δx)\n\nReturn midpoint relative to the nearest integer plus a shift.\n\nReturn the midpoints xmid of the segments, as pair (diff, shift), where shift is in (0, 1, -1) and diff is a float such that xmid == shift + diff to floating point accuracy.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.significance","page":"Private","title":"SparseIR.significance","text":"significance(basis::AbstractBasis)\n\nReturn vector σ, where 0 ≤ σ[i] ≤ 1 is the significance level of the i-th basis function. If ϵ is the desired accuracy to which to represent a propagator, then any basis function where σ[i] < ϵ can be neglected.\n\nFor the IR basis, we simply have that σ[i] = s[i] / first(s).\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.split-Tuple{Any, Real}","page":"Private","title":"SparseIR.split","text":"split(poly, x)\n\nSplit segment.\n\nFind segment of poly's domain that covers x.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.statistics-Union{Tuple{SparseIR.AbstractBasis{S}}, Tuple{S}} where S<:SparseIR.Statistics","page":"Private","title":"SparseIR.statistics","text":"statistics(basis::AbstractBasis)\n\nQuantum statistic (Statistics instance, Fermionic() or Bosonic()).\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.sve_hints","page":"Private","title":"SparseIR.sve_hints","text":"sve_hints(kernel, ε)\n\nProvide discretisation hints for the SVE routines.\n\nAdvises the SVE routines of discretisation parameters suitable in tranforming the (infinite) SVE into an (finite) SVD problem.\n\nSee also AbstractSVEHints.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.to_IR","page":"Private","title":"SparseIR.to_IR","text":"to_IR(dlr::DiscreteLehmannRepresentation, g_dlr::AbstractArray, dims=1)\n\nFrom DLR to IR. g_dlr`: Expansion coefficients in DLR.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.truncate-Tuple{Any, Any, Any}","page":"Private","title":"SparseIR.truncate","text":"truncate(u, s, v; rtol=0.0, lmax=typemax(Int))\n\nTruncate singular value expansion.\n\nArguments\n\n- `u`, `s`, `v`: Thin singular value expansion\n- `rtol`: Only singular values satisfying `s[l]/s[1] > rtol` are retained.\n- `lmax`: At most the `lmax` most significant singular values are retained.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.value-Tuple{MatsubaraFreq, Real}","page":"Private","title":"SparseIR.value","text":"Get value of the Matsubara frequency ω = n*π/β\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.valueim-Tuple{MatsubaraFreq, Real}","page":"Private","title":"SparseIR.valueim","text":"Get complex value of the Matsubara frequency iω = iπ/β * n\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.weight_func","page":"Private","title":"SparseIR.weight_func","text":"weight_func(kernel, statistics::Statistics)\n\nReturn the weight function for the given statistics.\n\nFermion: w(x) == 1\nBoson: w(y) == 1/tanh(Λ*y/2)\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.workarrlength-Tuple{SparseIR.AbstractSampling, AbstractArray}","page":"Private","title":"SparseIR.workarrlength","text":"workarrlength(smpl::AbstractSampling, al; dim=1)\n\nReturn length of workarr for fit!.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.xrange","page":"Private","title":"SparseIR.xrange","text":"xrange(kernel)\n\nReturn a tuple (x_mathrmmin x_mathrmmax) delimiting the range of allowed x values.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.ypower","page":"Private","title":"SparseIR.ypower","text":"ypower(kernel)\n\nPower with which the y coordinate scales.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.yrange","page":"Private","title":"SparseIR.yrange","text":"yrange(kernel)\n\nReturn a tuple (y_mathrmmin y_mathrmmax) delimiting the range of allowed y values.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.zeta-Tuple{MatsubaraFreq}","page":"Private","title":"SparseIR.zeta","text":"Get statistics ζ for Matsubara frequency ω = (2*m+ζ)*π/β\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.Λ","page":"Private","title":"SparseIR.Λ","text":"Λ(basis::AbstractBasis)\nlambda(basis::AbstractBasis)\n\nBasis cutoff parameter, Λ = β * ωmax, or None if not present\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.β-Tuple{SparseIR.AbstractBasis}","page":"Private","title":"SparseIR.β","text":"β(basis::AbstractBasis)\nbeta(basis::AbstractBasis)\n\nInverse temperature or nothing if unscaled basis.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.ωmax","page":"Private","title":"SparseIR.ωmax","text":"ωmax(basis::AbstractBasis)\nwmax(basis::AbstractBasis)\n\nReal frequency cutoff or nothing if unscaled basis.\n\n\n\n\n\n","category":"function"},{"location":"private/","page":"Private","title":"Private","text":"Modules = [SparseIR._LinAlg]\nPrivate = true\nPublic = true","category":"page"},{"location":"private/#SparseIR._LinAlg.givens_lmul-Union{Tuple{T}, Tuple{Tuple{T, T}, Any}} where T","page":"Private","title":"SparseIR._LinAlg.givens_lmul","text":"Apply Givens rotation to vector:\n\n [ a ] = [ c s ] [ x ]\n [ b ] [ -s c ] [ y ]\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.givens_params-Union{Tuple{T}, Tuple{T, T}} where T<:AbstractFloat","page":"Private","title":"SparseIR._LinAlg.givens_params","text":"Compute Givens rotation R matrix that satisfies:\n\n[ c s ] [ f ] [ r ]\n[ -s c ] [ g ] = [ 0 ]\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.rrqr!-Union{Tuple{AbstractMatrix{T}}, Tuple{T}} where T<:AbstractFloat","page":"Private","title":"SparseIR._LinAlg.rrqr!","text":"Truncated rank-revealing QR decomposition with full column pivoting.\n\nDecomposes a (m, n) matrix A into the product:\n\nA[:,piv] == Q * R\n\nwhere Q is an (m, k) isometric matrix, R is a (k, n) upper triangular matrix, piv is a permutation vector, and k is chosen such that the relative tolerance tol is met in the equality above.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.rrqr-Union{Tuple{AbstractMatrix{T}}, Tuple{T}} where T<:AbstractFloat","page":"Private","title":"SparseIR._LinAlg.rrqr","text":"Truncated rank-revealing QR decomposition with full column pivoting.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.svd2x2-Union{Tuple{T}, NTuple{4, T}} where T","page":"Private","title":"SparseIR._LinAlg.svd2x2","text":"Perform the SVD of an arbitrary two-by-two matrix:\n\n [ a11 a12 ] = [ cu -su ] [ smax 0 ] [ cv sv ]\n [ a21 a22 ] [ su cu ] [ 0 smin ] [ -sv cv ]\n\nNote that smax and smin can be negative.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.svd2x2-Union{Tuple{T}, Tuple{T, T, T}} where T<:AbstractFloat","page":"Private","title":"SparseIR._LinAlg.svd2x2","text":"Perform the SVD of upper triangular two-by-two matrix:\n\n [ f g ] = [ cu -su ] [ smax 0 ] [ cv sv ]\n [ 0 h ] [ su cu ] [ 0 smin ] [ -sv cv ]\n\nNote that smax and smin can be negative.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.svd_jacobi!-Union{Tuple{AbstractMatrix{T}}, Tuple{T}} where T","page":"Private","title":"SparseIR._LinAlg.svd_jacobi!","text":"Singular value decomposition using Jacobi rotations.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.svd_jacobi-Union{Tuple{AbstractMatrix{T}}, Tuple{T}} where T","page":"Private","title":"SparseIR._LinAlg.svd_jacobi","text":"Singular value decomposition using Jacobi rotations.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.truncate_qr_result-Union{Tuple{T}, Tuple{LinearAlgebra.QRPivoted{T, S, C} where {S<:AbstractMatrix{T}, C<:AbstractVector{T}}, Integer}} where T","page":"Private","title":"SparseIR._LinAlg.truncate_qr_result","text":"Truncate RRQR result low-rank\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.tsvd!-Union{Tuple{AbstractMatrix{T}}, Tuple{T}} where T<:AbstractFloat","page":"Private","title":"SparseIR._LinAlg.tsvd!","text":"Truncated singular value decomposition.\n\nDecomposes an (m, n) matrix A into the product:\n\nA == U * (s .* VT)\n\nwhere U is a (m, k) matrix with orthogonal columns, VT is a (k, n) matrix with orthogonal rows and s are the singular values, a set of k nonnegative numbers in non-ascending order. The SVD is truncated in the sense that singular values below tol are discarded.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.tsvd-Union{Tuple{AbstractMatrix{T}}, Tuple{T}} where T<:AbstractFloat","page":"Private","title":"SparseIR._LinAlg.tsvd","text":"Truncated singular value decomposition.\n\n\n\n\n\n","category":"method"},{"location":"public/","page":"Public","title":"Public","text":"CurrentModule = SparseIR","category":"page"},{"location":"public/#Public-names-index","page":"Public","title":"Public names index","text":"","category":"section"},{"location":"public/","page":"Public","title":"Public","text":"Modules = [SparseIR]\nPrivate = false\nPublic = true","category":"page"},{"location":"public/#SparseIR.SparseIR","page":"Public","title":"SparseIR.SparseIR","text":"Intermediate representation (IR) for many-body propagators.\n\n\n\n\n\n","category":"module"},{"location":"public/#SparseIR.AugmentedBasis","page":"Public","title":"SparseIR.AugmentedBasis","text":"AugmentedBasis <: AbstractBasis\n\nAugmented basis on the imaginary-time/frequency axis.\n\nGroups a set of additional functions, augmentations, with a given basis. The augmented functions then form the first basis functions, while the rest is provided by the regular basis, i.e.:\n\nu[l](x) == l < naug ? augmentations[l](x) : basis.u[l-naug](x),\n\nwhere naug = length(augmentations) is the number of added basis functions through augmentation. Similar expressions hold for Matsubara frequencies.\n\nAugmentation is useful in constructing bases for vertex-like quantities such as self-energies [wallerberger2021] and when constructing a two-point kernel that serves as a base for multi-point functions [shinaoka2018].\n\nwarning: Warning\nBases augmented with TauConst and TauLinear tend to be poorly conditioned. Care must be taken while fitting and compactness should be enforced if possible to regularize the problem.While vertex bases, i.e. bases augmented with MatsubaraConst, stay reasonably well-conditioned, it is still good practice to treat the Hartree–Fock term separately rather than including it in the basis, if possible.\n\nSee also: MatsubaraConst for vertex basis [wallerberger2021], TauConst, TauLinear for multi-point [shinaoka2018]\n\n[wallerberger2021]: https://doi.org/10.1103/PhysRevResearch.3.033168\n\n[shinaoka2018]: https://doi.org/10.1103/PhysRevB.97.205111\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.Bosonic","page":"Public","title":"SparseIR.Bosonic","text":"Bosonic statistics.\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.DiscreteLehmannRepresentation","page":"Public","title":"SparseIR.DiscreteLehmannRepresentation","text":"DiscreteLehmannRepresentation <: AbstractBasis\n\nDiscrete Lehmann representation (DLR) with poles selected according to extrema of IR.\n\nThis class implements a variant of the discrete Lehmann representation (DLR) 1. Instead of a truncated singular value expansion of the analytic continuation kernel K like the IR, the discrete Lehmann representation is based on a \"sketching\" of K. The resulting basis is a linear combination of discrete set of poles on the real-frequency axis, continued to the imaginary-frequency axis:\n\n G(iv) == sum(a[i] / (iv - w[i]) for i in range(L))\n\nWarning The poles on the real-frequency axis selected for the DLR are based on a rank-revealing decomposition, which offers accuracy guarantees. Here, we instead select the pole locations based on the zeros of the IR basis functions on the real axis, which is a heuristic. We do not expect that difference to matter, but please don't blame the DLR authors if we were wrong :-)\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.Fermionic","page":"Public","title":"SparseIR.Fermionic","text":"Fermionic statistics.\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.FiniteTempBasis","page":"Public","title":"SparseIR.FiniteTempBasis","text":"FiniteTempBasis <: AbstractBasis\n\nIntermediate representation (IR) basis for given temperature.\n\nFor a continuation kernel K from real frequencies, ω ∈ [-ωmax, ωmax], to imaginary time, τ ∈ [0, β], this type stores the truncated singular value expansion or IR basis:\n\nK(τ, ω) ≈ sum(u[l](τ) * s[l] * v[l](ω) for l in 1:L)\n\nThis basis is inferred from a reduced form by appropriate scaling of the variables.\n\nFields\n\nu::PiecewiseLegendrePolyVector: Set of IR basis functions on the imaginary time (tau) axis. These functions are stored as piecewise Legendre polynomials.\nTo obtain the value of all basis functions at a point or a array of points x, you can call the function u(x). To obtain a single basis function, a slice or a subset l, you can use u[l].\nuhat::PiecewiseLegendreFT: Set of IR basis functions on the Matsubara frequency (wn) axis. These objects are stored as a set of Bessel functions.\nTo obtain the value of all basis functions at a Matsubara frequency or a array of points wn, you can call the function uhat(wn). Note that we expect reduced frequencies, which are simply even/odd numbers for bosonic/fermionic objects. To obtain a single basis function, a slice or a subset l, you can use uhat[l].\ns: Vector of singular values of the continuation kernel\nv::PiecewiseLegendrePoly: Set of IR basis functions on the real frequency (w) axis. These functions are stored as piecewise Legendre polynomials.\nTo obtain the value of all basis functions at a point or a array of points w, you can call the function v(w). To obtain a single basis function, a slice or a subset l, you can use v[l].\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.FiniteTempBasis-Union{Tuple{S}, Tuple{Real, Real}, Tuple{Real, Real, Any}} where S","page":"Public","title":"SparseIR.FiniteTempBasis","text":"FiniteTempBasis{S}(β, ωmax, ε=nothing; max_size=nothing, args...)\n\nConstruct a finite temperature basis suitable for the given S (Fermionic or Bosonic) and cutoffs β and ωmax.\n\n\n\n\n\n","category":"method"},{"location":"public/#SparseIR.FiniteTempBasisSet","page":"Public","title":"SparseIR.FiniteTempBasisSet","text":"FiniteTempBasisSet\n\nType for holding IR bases and sparse-sampling objects.\n\nAn object of this type holds IR bases for fermions and bosons and associated sparse-sampling objects.\n\nFields\n\nbasis_f::FiniteTempBasis: Fermion basis\nbasis_b::FiniteTempBasis: Boson basis\ntau::Vector{Float64}: Sampling points in the imaginary-time domain\nwn_f::Vector{Int}: Sampling fermionic frequencies\nwn_b::Vector{Int}: Sampling bosonic frequencies\nsmpltauf::TauSampling: Sparse sampling for tau & fermion\nsmpltaub::TauSampling: Sparse sampling for tau & boson\nsmplwnf::MatsubaraSampling: Sparse sampling for Matsubara frequency & fermion\nsmplwnb::MatsubaraSampling: Sparse sampling for Matsubara frequency & boson\nsve_result::Tuple{PiecewiseLegendrePoly,Vector{Float64},PiecewiseLegendrePoly}: Results of SVE\n\nGetters\n\nbeta::Float64: Inverse temperature\nωmax::Float64: Cut-off frequency\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.LogisticKernel","page":"Public","title":"SparseIR.LogisticKernel","text":"LogisticKernel <: AbstractKernel\n\nFermionic/bosonic analytical continuation kernel.\n\nIn dimensionless variables x = 2 τβ - 1, y = β ωΛ, the integral kernel is a function on -1 1 -1 1:\n\n K(x y) = frace^-Λ y (x + 1) 21 + e^-Λ y\n\nLogisticKernel is a fermionic analytic continuation kernel. Nevertheless, one can model the τ dependence of a bosonic correlation function as follows:\n\n frace^-Λ y (x + 1) 21 - e^-Λ y ρ(y) dy = K(x y) ρ(y) dy\n\nwith\n\n ρ(y) = w(y) ρ(y)\n\nwhere the weight function is given by\n\n w(y) = frac1tanh(Λ y2)\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.MatsubaraConst","page":"Public","title":"SparseIR.MatsubaraConst","text":"MatsubaraConst <: AbstractAugmentation\n\nConstant in Matsubara, undefined in imaginary time.\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.MatsubaraFreq","page":"Public","title":"SparseIR.MatsubaraFreq","text":"MatsubaraFreq(n)\n\nPrefactor n of the Matsubara frequency ω = n*π/β\n\nStruct representing the Matsubara frequency ω entering the Fourier transform of a propagator G(τ) on imaginary time τ to its Matsubara equivalent Ĝ(iω) on the imaginary-frequency axis:\n\n β\nĜ(iω) = ∫ dτ exp(iωτ) G(τ) with ω = n π/β,\n 0\n\nwhere β is inverse temperature and by convention we include the imaginary unit in the frequency argument, i.e, Ĝ(iω). The frequencies depend on the statistics of the propagator, i.e., we have that:\n\nG(τ - β) = ± G(τ)\n\nwhere + is for bosons and - is for fermions. The frequencies are restricted accordingly.\n\nBosonic frequency (S == Fermionic): n even (periodic in β)\nFermionic frequency (S == Bosonic): n odd (anti-periodic in β)\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.MatsubaraSampling","page":"Public","title":"SparseIR.MatsubaraSampling","text":"MatsubaraSampling <: AbstractSampling\n\nSparse sampling in Matsubara frequencies.\n\nAllows the transformation between the IR basis and a set of sampling points in (scaled/unscaled) imaginary frequencies.\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.MatsubaraSampling-Tuple{SparseIR.AbstractBasis}","page":"Public","title":"SparseIR.MatsubaraSampling","text":"MatsubaraSampling(basis; positive_only=false,\n sampling_points=default_matsubara_sampling_points(basis; positive_only),\n factorize=true)\n\nConstruct a MatsubaraSampling object. If not given, the sampling_points are chosen as the (discrete) extrema of the highest-order basis function in Matsubara. This turns out to be close to optimal with respect to conditioning for this size (within a few percent).\n\nBy setting positive_only=true, one assumes that functions to be fitted are symmetric in Matsubara frequency, i.e.:\n\n G(iν) = conj(G(-iν))\n\nor equivalently, that they are purely real in imaginary time. In this case, sparse sampling is performed over non-negative frequencies only, cutting away half of the necessary sampling space. factorize controls whether the SVD decomposition is computed.\n\n\n\n\n\n","category":"method"},{"location":"public/#SparseIR.RegularizedBoseKernel","page":"Public","title":"SparseIR.RegularizedBoseKernel","text":"RegularizedBoseKernel <: AbstractKernel\n\nRegularized bosonic analytical continuation kernel.\n\nIn dimensionless variables x = 2 τβ - 1, y = β ωΛ, the fermionic integral kernel is a function on -1 1 -1 1:\n\n K(x y) = y frace^-Λ y (x + 1) 2e^-Λ y - 1\n\nCare has to be taken in evaluating this expression around y = 0.\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.TauConst","page":"Public","title":"SparseIR.TauConst","text":"TauConst <: AbstractAugmentation\n\nConstant in imaginary time/discrete delta in frequency.\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.TauLinear","page":"Public","title":"SparseIR.TauLinear","text":"TauLinear <: AbstractAugmentation\n\nLinear function in imaginary time, antisymmetric around β/2.\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.TauSampling","page":"Public","title":"SparseIR.TauSampling","text":"TauSampling <: AbstractSampling\n\nSparse sampling in imaginary time.\n\nAllows the transformation between the IR basis and a set of sampling points in (scaled/unscaled) imaginary time.\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.TauSampling-Tuple{SparseIR.AbstractBasis}","page":"Public","title":"SparseIR.TauSampling","text":"TauSampling(basis; sampling_points=default_tau_sampling_points(basis), factorize=true)\n\nConstruct a TauSampling object. If not given, the sampling_points are chosen as the extrema of the highest-order basis function in imaginary time. This turns out to be close to optimal with respect to conditioning for this size (within a few percent). factorize controls whether the SVD decomposition is computed.\n\n\n\n\n\n","category":"method"},{"location":"public/#SparseIR.evaluate!-Union{Tuple{N}, Tuple{T}, Tuple{S}, Tuple{AbstractArray{T, N}, SparseIR.AbstractSampling, AbstractArray{S, N}}} where {S, T, N}","page":"Public","title":"SparseIR.evaluate!","text":"evaluate!(buffer::AbstractArray{T,N}, sampling, al; dim=1) where {T,N}\n\nLike evaluate, but write the result to buffer. Please use dim = 1 or N to avoid allocating large temporary arrays internally.\n\n\n\n\n\n","category":"method"},{"location":"public/#SparseIR.evaluate-Union{Tuple{N}, Tuple{T}, Tuple{Tmat}, Tuple{S}, Tuple{SparseIR.AbstractSampling{S, Tmat}, AbstractArray{T, N}}} where {S, Tmat, T, N}","page":"Public","title":"SparseIR.evaluate","text":"evaluate(sampling, al; dim=1)\n\nEvaluate the basis coefficients al at the sparse sampling points.\n\n\n\n\n\n","category":"method"},{"location":"public/#SparseIR.fit!-Union{Tuple{N}, Tuple{T}, Tuple{S}, Tuple{Array{S, N}, SparseIR.AbstractSampling, Array{T, N}}} where {S, T, N}","page":"Public","title":"SparseIR.fit!","text":"fit!(buffer::Array{S,N}, smpl::AbstractSampling, al::Array{T,N}; \n dim=1, workarr::Vector{S}) where {S,T,N}\n\nLike fit, but write the result to buffer. Use dim = 1 or dim = N to avoid allocating large temporary arrays internally. The length of workarr cannot be smaller than SparseIR.workarrlength(smpl, al).\n\n\n\n\n\n","category":"method"},{"location":"public/#SparseIR.fit-Union{Tuple{N}, Tuple{T}, Tuple{Tmat}, Tuple{S}, Tuple{SparseIR.AbstractSampling{S, Tmat}, AbstractArray{T, N}}} where {S, Tmat, T, N}","page":"Public","title":"SparseIR.fit","text":"fit(sampling, al::AbstractArray{T,N}; dim=1)\n\nFit basis coefficients from the sparse sampling points Please use dim = 1 or N to avoid allocating large temporary arrays internally.\n\n\n\n\n\n","category":"method"},{"location":"public/#SparseIR.overlap-Union{Tuple{F}, Tuple{SparseIR.PiecewiseLegendrePoly, F}} where F","page":"Public","title":"SparseIR.overlap","text":"overlap(poly::PiecewiseLegendrePoly, f; \n rtol=eps(T), return_error=false, maxevals=10^4, points=T[])\n\nEvaluate overlap integral of poly with arbitrary function f.\n\nGiven the function f, evaluate the integral\n\n∫ dx f(x) poly(x)\n\nusing adaptive Gauss-Legendre quadrature.\n\npoints is a sequence of break points in the integration interval where local difficulties of the integrand may occur (e.g. singularities, discontinuities).\n\n\n\n\n\n","category":"method"},{"location":"","page":"Home","title":"Home","text":"CurrentModule = SparseIR","category":"page"},{"location":"#SparseIR.jl","page":"Home","title":"SparseIR.jl","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"Documentation for SparseIR.jl.","category":"page"},{"location":"","page":"Home","title":"Home","text":"There is a guide available which details SparseIR.jl's inner workings by means of a worked example.","category":"page"},{"location":"","page":"Home","title":"Home","text":"For listings of all documented names, see Public names index and the Private names index.","category":"page"}] +[{"location":"guide/#guide","page":"Guide","title":"Introduction","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"We present SparseIR.jl, a Julia library for constructing and working with the intermediate representation of correlation functions [1–4]. The intermediate representation (IR) takes the matrix kernel transforming propagators between the real-frequency axis and the imaginary-time axis and performs a singular value expansion (SVE) on it. This decomposes the matrix kernel into a set of singular values as well as two sets of functions. One of those lives on the real-frequency axis and one on the imaginary-time axis. Expressing a propagator in terms of either basis–by an ordinary least squares fit–then allows us to easily transition between them. In combination with a prescription for constructing sparse sets of sampling points on each axis, we have a method for optimally compressing propagators.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"SparseIR.jl implements the intermediate representation, providing on-the-fly computation of basis functions and singular values accurate to full precision along with routines for sparse sampling. It is further fully unit tested, featuring near-complete code coverage. Here, we will explain its inner structure by means of an example use case. In preparing this document, SparseIR.jl version 1.0.18 and Julia version 1.11.1 were used.","category":"page"},{"location":"guide/#Problem-statement","page":"Guide","title":"Problem statement","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"We take a problem to be solved from the sparse-ir paper [4].","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"Let us perform self-consistent second-order perturbation theory for the single impurity Anderson model at finite temperature. Its Hamiltonian is given by H = U c^dagger_uparrow c^dagger_downarrow c_downarrow c_uparrow + sum_psigma big(V_psigma f_psigma^dagger c_sigma + V_psigma^* c_sigma^dagger f_psigmabig) + sum_psigma epsilon_p f_psigma^dagger f_psigmawhere U is the electron interaction strength, c_sigma annihilates an electron on the impurity, f_psigma annihilates an electron in the bath, dagger denotes the Hermitian conjugate, pinmathbb R is bath momentum, and sigmainuparrow downarrow the spin. The hybridization strength V_psigma and bath energies epsilon_p are chosen such that the non-interacting density of states is semi-elliptic with a half-bandwidth of one, rho_0(omega) = frac2pisqrt1-omega^2, U=12, beta=10, [...]","category":"page"},{"location":"guide/#outline","page":"Guide","title":"Outline","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"To provide an overview, we first give the full code used to solve the problem with SparseIR.jl.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"using SparseIR\n\nβ = 10.0; ωmax = 8.0; ε = 1e-6;\n\n# Construct the IR basis and sparse sampling for fermionic propagators\nbasis = FiniteTempBasis{Fermionic}(β, ωmax, ε)\nsτ = TauSampling(basis)\nsiω = MatsubaraSampling(basis; positive_only=true)\n\n# Solve the single impurity Anderson model coupled to a bath with a\n# semicircular density of states with unit half bandwidth.\nU = 1.2\nρ₀(ω) = 2/π * √(1 - clamp(ω, -1, +1)^2)\n\n# Compute the IR basis coefficients for the non-interacting propagator\nρ₀l = overlap.(basis.v, ρ₀)\nG₀l = -basis.s .* ρ₀l\n\n# Self-consistency loop: alternate between second-order expression for the\n# self-energy and the Dyson equation until convergence.\nGl = copy(G₀l)\nΣl = zero(Gl)\nGl_prev = zero(Gl)\nG₀iω = evaluate(siω, G₀l)\nwhile !isapprox(Gl, Gl_prev, rtol=ε)\n Gl_prev = copy(Gl)\n Gτ = evaluate(sτ, Gl)\n Στ = @. U^2 * Gτ^3\n Σl = fit(sτ, Στ)\n Σiω = evaluate(siω, Σl)\n Giω = @. (G₀iω^-1 - Σiω)^-1\n Gl = fit(siω, Giω)\nend","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"Note that this script as presented is optimized for readability instead of performance; in practice, you would want to make minor adjustments to ensure maximum type inferrability and full type stability, among other things putting the code in a function instead of executing in global scope. Such an performance-optimized version is provided in Appendix: Optimized script. The following is a detailed explanation of what happens here under the hood and why.","category":"page"},{"location":"guide/#Treatment","page":"Guide","title":"Treatment","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"If we take the second-order expression for the self-energy, which at half filling is simply ","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" Sigma(tau) = U^2 pqtyG(tau)^3","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"and the Dyson equation","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" hat G(mathrmiomega) = pqtypqtyhat G_0(mathrmiomega)^-1 - hatSigma(mathrmiomega)^-1","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"we have a system of two coupled equations. The first one is diagonal in tau and the second is diagonal in mathrmiomega, so we need a way of converting efficiently between these two axes.","category":"page"},{"location":"guide/#Basis-construction","page":"Guide","title":"Basis construction","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"We first import SparseIR and construct an appropriate basis. To do so, we must first choose an appropriate UV frequency cutoff omega_mathrmmax, representing the maximum bandwidth our basis can capture. The non-interacting density of states in our problem is semi-elliptic with half-bandwidth 1. Once we introduce interactions via the interaction strength U, this band splits into the lower and the upper Hubbard bands, centered around omega = pm U2 respectively. So the bandwidth should be around 32 at a minimum, but we choose more than twice that with omega_mathrmmax = 8 to be safe.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"julia> using SparseIR\n\njulia> β = 10.0; ωmax = 8.0; ε = 1e-6;\n\njulia> basis = FiniteTempBasis{Fermionic}(β, ωmax, ε)\n20-element FiniteTempBasis{Fermionic} with β = 10.0, ωmax = 8.0 and singular values:\n 1.4409730317545617\n 1.2153954454510802\n 0.7652662478347486\n 0.49740673945822533\n 0.288562095623106\n 0.1639819552743817\n 0.08901271087151318\n 0.046837974354297436\n 0.023857653233506308\n 0.01179373309602762\n 0.005662400021411787\n 0.0026427291749051072\n 0.0011996720525663963\n 0.0005299554043095754\n 0.00022790287514550545\n 9.544046906619884e-5\n 3.8931895383167936e-5\n 1.5472919567017398e-5\n 5.992753725069063e-6\n 2.2623276239584257e-6","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"There is quite a lot happening behind the scenes in this first innocuous-looking statement, so we will break it down:","category":"page"},{"location":"guide/#Kernel","page":"Guide","title":"Kernel","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"Consider a propagator/Green's function defined on the imaginary-time axis","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" G(tau) equiv -evT_tau A(tau) B(0)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"and the associated spectral function in real frequency rho(omega) = -(1pi) mathrmImG(omega). These are related via","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" G(tau) = -int_-omega_mathrmmax^+omega_mathrmmax ddomega tilde K(tau omega) rho(omega)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"with the integral kernel","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" tilde K(tau omega) = frace^-tauomegae^-betaomega + 1","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"mediating between them. If we perform an SVE on this kernel, yielding the decomposition","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" tilde K(tau omega) = sum_ell=1^infty U_ell(tau) S_ell V_ell(omega)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"with the U_ells and V_ells each forming an orthonormal system, we can write","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" G(tau) = sum_ell=1^infty U_ell(tau) G_ell = sum_ell=1^L U_ell(tau) G_ell + epsilon_L+1(tau)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"with expansion coefficients given by","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" G_ell = -int_-omega_mathrmmax^+omega_mathrmmax ddomega S_ell V_ell(omega) rho(omega)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"The singular values decay at least exponentially with log S_ell = order-ell log(betaomega_mathrmmax). Hence, the error epsilon_L+1(tau) we incur by representing the Green's function in this way and cutting off the sum after L terms does, too. If we know its expansion coefficients, we can easily compute the propagator's Fourier transform by ","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" hat G(mathrmiomega) = int_0^beta ddtau e^mathrmiomegatau G(tau) approx sum_ell=1^L hat U_ell(mathrmiomega) G_ell","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"where mathrmiomega = (2n+1)mathrmipibeta with n in mathbb Z is a Matsubara frequency. The representation in terms of these expansion coefficients is called the intermediate representation, which SparseIR.jl is concerned with.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"To standardize our variables, we define x in -1+1 and y in -1+1 by","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" tau = beta (x+1)2 qand omega = omega_mathrmmax y","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"so that the kernel can be written","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" K(x y) = frace^-Lambda y (x + 1) 2e^-Lambda y + 1","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"with Lambda = betaomega_mathrmmax = 80. This is represented by the object LogisticKernel(80.0), which FiniteTempBasis uses internally. (Image: Logistic kernel used to construct the basis in our problem treatment K(x,y).)","category":"page"},{"location":"guide/#Singular-value-expansion","page":"Guide","title":"Singular value expansion","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"Central is the singular value expansion [5], which is handled by the function SVEResult: Its purpose is to construct the decomposition","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" K(x y) approx sum_ell = 0^L U_ell(x) S_ell V_ell(y)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"where U_ell(x) and V_ell(y) are called K's left and right singular functions respectively and S_ell are its singular values. By construction, the singular functions form an orthonormal basis, i.e.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" int ddx U_ell(x) U_ell(x) = delta_ellell = int ddy V_ell(y) V_ell(y)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"and thus above equation is equivalent to a pair of eigenvalue equations","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"beginaligned\n S_ell U_ell(x) = int ddy K(x y) V_ell(y) \n S_ell V_ell(y) = int ddx K(x y) U_ell(x)\nendaligned","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"Here and in what follows, unless otherwise indicated, integrals are taken to be over the interval -1+1 (because we rescaled to x and y variables).","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"The function first calls the choose_accuracy helper and thereby sets the appropriate working precision. Because we did not specify a working accuracy varepsilon^2, it chooses machine precision eps(Float64), i.e. varepsilon approx 22 times 10^-16 and working type Float64x2 - a 128 bits floating point type provided by the MultiFloats.jl package - because in computing the SVD we incur a precision loss of about half our input bits. This leaves us with full double accuracy results only if we use quad precision during the computation.\nThen - by calling out to the CentrosymmSVE constructor - a support grid x_i times y_j for the kernel to be evaluated later on is built. Along with these support points, weights w_i and z_j are computed. These points and weights consist of repeated scaled Gauss integration rules, such that\n int ddx f(x) approx sum_i f(x_i) w_i\n quadtextandquad\n int ddy g(y) approx sum_j g(y_j) z_j\nTo get an idea regarding the distribution of these sampling points, refer to Fig. 2.2, which shows x_i times y_j for Lambda = 80: (Image: Sampling point distribution resulting from a Cartesian product of Gauss integration rules.)\nNote:\nThe points do not cover -1 1 times -1 1 but only 0 1 times 0 1. This is actually a special case as we exploit the kernel's centrosymmetry, i.e. K(x y) = K(-x -y). It is straightforward to show that the left/right singular vectors then can be chosen as either odd or even functions.\nConsequentially, we actually sample from a reduced kernel K^mathrmred_pm on 0 1 times 0 1 that is given as either\n K^mathrmred_pm(x y) = K(x y) pm K(x -y)\ngaining a 4-fold speedup (because we take only a quarter of the domain) in constructing the SVE. The full singular functions can be reconstructed by (anti-)symmetrically continuing them to the negative axis. (Image: Reduced kernels, as a function of x and y, parameterizing imaginary time and real frequency, respectively. Compare their [0,1] × [0,1] subregions with the sampling point distribution plot above.)\nUsing the integration rules allows us to approximate\nbeginaligned\n S_ell U_ell(x_i) approx sum_j K(x_i y_j) V_ell(y_j) z_j forall i \n S_ell V_ell(y_j) approx sum_i K(x_i y_j) U_ell(x_i) w_i forall j\nendaligned\nwhich we now multiply by sqrtw_i and sqrtz_j respectively to normalize our basis functions, yielding\nbeginaligned\n S_ell sqrtw_i U_ell(x_i) approx sum_j sqrtw_i K(x_i y_j) sqrtz_j sqrtz_j V_ell(y_j) \n S_ell sqrtz_j V_ell(y_j) approx sum_i sqrtw_i K(x_i y_j) sqrtz_j sqrtw_i U_ell(x_i)\nendaligned\nIf we now define vectors vec u_ell, vec v_ell and a matrix K with entries u_ell i equiv sqrtw_i U_ell(x_i), v_ell j equiv sqrtz_j V_ell(y_j) and K_ij equiv sqrtw_i K(x_i y_j) sqrtz_j, we obtain\nbeginaligned\n S_ell u_ell i approx sum_j K_ij v_ell j \n S_ell v_ell j approx sum_i K_ij u_ell i\nendaligned\nor\nbeginaligned\n S_ell vec u_ell approx K^phantommathrmT vec v_ell \n S_ell vec v_ell approx K^mathrmT vec u_ell\nendaligned\nTogether with the property vec u_ell^mathrmT vec u_ell approx delta_ellell approx vec v_ell^mathrmT vec v_ell we have successfully translated the original SVE problem into an SVD, because\n K = sum_ell S_ell vec u_ell vec v_ell^mathrmT\nThe next step is calling the matrices function which computes the matrix K derived in the previous step.\nnote: Note\nThe function is named in the plural because in the centrosymmetric case it actually returns two matrices K_+ and K_-, one for the even and one for the odd kernel. The SVDs of these matrices are later concatenated, so for simplicity, we will refer to K from here on out.\ninfo: Info\nSpecial care is taken here to avoid FP-arithmetic cancellation around x = -1 and x = +1.\n(Image: Kernel matrices, rotated 90 degrees counterclockwise to make the connection with the (subregion [0,1] × [0,1] of the) previous figure more obvious. Thus we can see how the choice of sampling points has magnified and brought to the matrices' centers the regions of interest. Furthermore, elements with absolute values smaller than 10\\% of the maximum have been omitted to emphasize the structure; this should however not be taken to mean that there is any sparsity to speak of we could exploit in the next step.)\nTake the truncated singular value decomposition (trSVD) of K, or rather, of K_+ and K_-. We use here a custom trSVD routine written by Markus Wallerberger which combines a homemade rank-revealing QR decomposition with GenericLinearAlgebra.svd!. This is necessary because there is currently no trSVD for quad precision types available.\nVia the function truncate, we throw away superfluous terms in our expansion. More specifically, we choose the basis size L such that S_ell S_0 varepsilon for all ell leq L. Here varepsilon is our selected precision, in our case it's equal to the double precision machine epsilon, 2^-52 approx 222 times 10^-16.\nFinally, we need a postprocessing step implemented in postprocess which performs some technical manipulation to turn the SVD result into the SVE we actually want. The functions are represented as piecewise Legendre polynomials, which model a function on the interval x_mathrmmin x_mathrmmax as a set of segments on the intervals a_i a_i+1, where on each interval the function is expanded in scaled Legendre polynomials. The interval endpoints are chosen such that they reflect the approximate position of roots of a high-order singular function in x.","category":"page"},{"location":"guide/#Finishing-touches","page":"Guide","title":"Finishing touches","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"The difficult part of constructing the FiniteTempBasis is now over. Next we truncate the left and right singular functions by discarding U_ell and V_ell with indices ell L to match the S_ell. The functions are now scaled to imaginary-time and frequency according to","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" tau = beta2 (x + 1) qand omega = omega_mathrmmax y","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"This means the singular values need to be multiplied by sqrt(beta2)omega_mathrmmax, because K(xy) sqrtdd xdd y = K(tauomega) sqrtddtauddomega. We also add to our basis hatU_ell(mathrmiomega), the Fourier transforms of the left singular functions, defined on the fermionic Matsubara frequencies mathrmiomega = mathrmi(2n+1)betapi (with integer n). This is particularly simple, because the Legendre polynomials' Fourier transforms are known analytically and given by spherical Bessel functions, for which we can rely on Bessels.jl [6].","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"We can now take a look at our basis functions to get a feel for them:","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"(Image: First 6 left singular basis functions on the imaginary-time axis.)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"(Image: First 6 right singular basis functions on the frequency axis.)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"Looking back at the image of the kernel K(xy) we can imagine how it is reconstructed by multiplying and summing (including a factor S_ell) U_ell(tau) and V_ell(omega). An important property of the left singular functions is interlacing, i.e. U_ell interlaces U_ell+1. A function g with roots alpha_n-1 leq ldots leq alpha_1 interlaces a function f with roots beta_n leq ldots leq beta_1 if","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" beta_n leq alpha_n-1 leq beta_n-1 leq ldots leq beta_1","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"We will use this property for constructing our sparse sampling set.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"(Image: First 8 Fourier transformed basis functions on the Matsubara frequency axis.)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"As for the Matsubara basis functions, we plot only the non-zero components, i.e. mathrmImhat U_ell(mathrmiomega) with odd ell and mathrmRehat U_ell(mathrmiomega) with even ell.","category":"page"},{"location":"guide/#Constructing-the-samplers","page":"Guide","title":"Constructing the samplers","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"With our basis complete, we construct sparse sampling objects for fermionic propagators on the imaginary-time axis and on the Matsubara frequency axis.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"julia> sτ = TauSampling(basis);\n\njulia> show(sampling_points(sτ))\n[0.018885255323127792, 0.10059312563754808, 0.25218900406693556, 0.4822117319309194, 0.8042299148252774, 1.2376463941125326, 1.8067997157763205, 2.535059399842931, 3.4296355795122793, 4.45886851573216, 5.541131484267839, 6.570364420487721, 7.464940600157068, 8.19320028422368, 8.762353605887466, 9.195770085174722, 9.51778826806908, 9.747810995933065, 9.899406874362452, 9.981114744676873]\n\njulia> siω = MatsubaraSampling(basis; positive_only=true);\n\njulia> show(sampling_points(siω))\nFermionicFreq[FermionicFreq(1), FermionicFreq(3), FermionicFreq(5), FermionicFreq(7), FermionicFreq(9), FermionicFreq(11), FermionicFreq(17), FermionicFreq(27), FermionicFreq(49), FermionicFreq(153)]","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"Both functions first determine a suitable set of sampling points on their respective axis. In the case of TauSampling, the sampling points tau_i are chosen as the extrema of the highest-order basis function in imaginary-time; this works because U_ell has exactly ell roots. This turns out to be close to optimal with respect to conditioning for this size (within a few percent). Similarly, MatsubaraSampling chooses sampling points mathrmiomega_n as the (discrete) extrema of the highest-order basis function in Matsubara. By setting positive_only=true, one assumes that functions to be fitted are symmetric in Matsubara frequency, i.e.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" hat G(mathrmiomega) = qty(hat G(-mathrmiomega))^*","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"In this case, sparse sampling is performed over non-negative frequencies only, cutting away half of the necessary sampling space, so we get only 10 sampling points instead of the 20 in the imaginary-time case.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"Then, both compute design matrices by E^tau_iell = u_ell(tau_i) and E^omega_nell = hatu_ell(iomega_n) as well as their SVDs. We are now able to get the IR basis coefficients of a function that is known on the imaginary-time sampling points by solving the fitting problem","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" G_ell = mathrmargmin_G_ell sum_tau_i normG(tau_i) - sum_ell E^tau_iell G_ell^2","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"which can be done efficiently once the SVD is known. The same can be done on the Matsubara axis","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" G_ell = mathrmargmin_G_ell sum_mathrmiomega_n normhatG(mathrmiomega_n) - sum_ell E^omega_nell G_ell^2","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"and taken together we now have a way of moving efficiently between both. In solving these problems, we need to take their conditioning into consideration; in the case of the Matsubara axis, the problem is somewhat worse conditioned than on the imaginary-time axis due to its discrete nature. We augment it therefore with 4 additional sampling frequencies.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"(Image: Scaling behavior of the fitting problem conditioning.)","category":"page"},{"location":"guide/#Initializing-the-iteration","page":"Guide","title":"Initializing the iteration","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"Because the non-interacting density of states is given rho_0(omega) = frac2pisqrt1 - omega^2, we can easily get the IR basis coefficients for the non-interacting propagator","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" G_0_ell = -S_ell rho_0_ell = -S_ell int ddomega V_ell(omega) rho_0(omega)","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"by utilizing the overlap function, which implements integration.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"julia> U = 1.2\n1.2\n\njulia> ρ₀(ω) = 2/π * √(1 - clamp(ω, -1, +1)^2)\nρ₀ (generic function with 1 method)\n\njulia> ρ₀l = overlap.(basis.v, ρ₀)\n20-element Vector{Float64}:\n 0.601244316541724\n 1.3444106938820255e-17\n -0.3114509472896204\n ⋮\n -4.553649124439119e-18\n -0.04700635138837371\n 1.734723475976807e-18\n\njulia> G₀l = -basis.s .* ρ₀l\n20-element Vector{Float64}:\n -0.8663768456323275\n -1.6339906341599403e-17\n 0.23834289781690587\n ⋮\n 7.045824663886568e-23\n 2.816974873845819e-7\n -3.924512839631511e-24","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"The coefficients of the full Green's function are then initialized with those of the non-interacting one. Also, we will need the non-interacting propagator in Matsubara for the Dyson equation, so we evaluate with the MatsubaraSampling object created before.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"julia> Gl = copy(G₀l)\n20-element Vector{Float64}:\n -0.8663768456323275\n -1.6339906341599403e-17\n ⋮\n 2.816974873845819e-7\n -3.924512839631511e-24\n\njulia> Σl = zero(Gl)\n20-element Vector{ComplexF64}:\n 0.0 + 0.0im\n 0.0 + 0.0im\n ⋮\n 0.0 + 0.0im\n 0.0 + 0.0im\n\njulia> Gl_prev = zero(Gl)\n20-element Vector{Float64}:\n 0.0\n 0.0\n ⋮\n 0.0\n 0.0\n\njulia> G₀iω = evaluate(siω, G₀l)\n10-element Vector{ComplexF64}:\n 1.0546844383198476e-16 - 1.468055523701327im\n 1.6747120525708993e-16 - 0.8633270688082162im\n ⋮\n 1.627612150170272e-17 - 0.06489281188294724im\n 6.134766817544449e-19 - 0.020802317001514643im","category":"page"},{"location":"guide/#Self-consistency-loop","page":"Guide","title":"Self-consistency loop","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"We are now ready to tackle the coupled equations from the start, and will restate them here for the reader's convenience:","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" Sigma(tau) = U^2 pqtyG(tau)^3","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"and the Dyson equation","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" hat G(mathrmiomega) = pqtypqtyhat G_0(mathrmiomega)^-1 - hatSigma(mathrmiomega)^-1","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"The first one is diagonal in tau and the second is diagonal in mathrmiomega, so we employ the IR basis to efficiently convert between the two bases. Starting with our approximation to G_ell we evaluate in the tau-basis to get G(tau), from which we can compute the self-energy on the sampling points Sigma(tau) according to the first equation. This can now be fitted to the tau-basis to get Sigma_ell, and from there hatSigma(mathrmiomega) via evaluation in the mathrmiomega-basis. Now the Dyson equation is used to get hat G(mathrmiomega) on the sampling frequencies, which is then fitted to the mathrmiomega-basis yielding G_ell and completing the loop. This is now performed until convergence.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"julia> while !isapprox(Gl, Gl_prev, rtol=ε)\n Gl_prev = copy(Gl)\n Gτ = evaluate(sτ, Gl)\n Στ = @. U^2 * Gτ^3\n Σl = fit(sτ, Στ)\n Σiω = evaluate(siω, Σl)\n Giω = @. (G₀iω^-1 - Σiω)^-1\n Gl = fit(siω, Giω)\n end","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"This is what one iteration looks like spelled out in equations:","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"beginaligned\n G^mathrmprev_ell = G_ell \n G(tau_i) = sum_ell U_ell(tau_i) G_ell \n Sigma(tau_i) = U^2 pqtyG(tau_i)^3 \n Sigma_ell = mathrmargmin_Sigma_ell sum_tau_i normSigma(tau_i) - sum_ell U_ell(tau_i) Sigma_ell^2 \n hatSigma(mathrmiomega_n) = sum_ell hat U_ell(mathrmiomega_n) Sigma_ell \n hat G(mathrmiomega_n) = pqtypqtyhat G_0(mathrmiomega_n)^-1 - hatSigma(mathrmiomega_n)^-1 \n G_ell = mathrmargmin_G_ell sum_mathrmiomega_n normhat G(mathrmiomega_n) - sum_ell hat U_ell(mathrmiomega_n) G_ell^2\nendaligned","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"We consider the iteration converged when the difference between subsequent iterations does not exceed the basis accuracy, i.e. when","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":" normG_ell - G^mathrmprev_ell leq varepsilon maxBqtynormG_ell normG^mathrmprev_ell","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"where the norm is normG_ell^2 = sum_ell=1^L G_ell^2.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"The entire script, as presented in Appendix: Optimized script, takes around 60ms to run on a laptop CPU from 2019 (Intel Core i7-9750H) and allocates roughly 19MB in the process.","category":"page"},{"location":"guide/#Visualizing-the-solution","page":"Guide","title":"Visualizing the solution","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"To plot our solution for the self-energy, we create a MatsubaraSampling object on a dense box of sampling frequencies. In this case, we only need it for expanding, i.e. multiplying a vector, hence there is no need for constructing the SVD, so we pass factorize=false.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"julia> box = FermionicFreq.(1:2:79)\n40-element Vector{FermionicFreq}:\n π/β\n 3π/β\n ⋮\n 77π/β\n 79π/β\n\njulia> siω_box = MatsubaraSampling(basis; sampling_points=box, factorize=false);\n\njulia> Σiω_box = evaluate(siω_box, Σl)\n40-element Vector{ComplexF64}:\n -6.067770915322836e-17 - 0.09325923974719101im\n 2.0279596075077236e-17 - 0.1225916020773678im\n ⋮\n -6.624594477591435e-17 - 0.014786512975659354im\n -7.08391512971528e-17 - 0.01441676347590391im","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"We are now in a position to visualize the results of our calculation in Fig 2.9:","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"In the main plot, the imaginary part of the self-energy in Matsubara alongside the sampling points on which it was computed. This illustrates very nicely one of the main advantages of our method: During the entire course of the iteration we only ever need to store and calculate the values of all functions on the sparse set of sampling points and are still able to expand the result on a dense frequency set in the end.\nIn the inset, the IR basis coefficients of the self-energy and of the propagator are shown, along with the basis singular values. We only plot the non-vanishing basis coefficients, which are those at odd values of ell because the real parts of hat G(mathrmiomega) and hat Sigma(mathrmiomega) are almost zero. The singular values S_ellS_1 are the bound for absG_l G_1 and absSigma_ell Sigma_1.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"(Image: Self-energy calculated in the self-consistency iteration. The inset shows the IR basis coefficients corresponding to the self-energy and the propagator.)","category":"page"},{"location":"guide/#Summary-and-outlook","page":"Guide","title":"Summary and outlook","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"We introduced SparseIR.jl, a full featured implementation of the intermediate representation in the Julia programming language. By means of a simple example, we explained in detail how to use it and the way it works internally. In this example, we solved an Anderson impurity model with elliptical density of states to second order perturbation theory in the interaction via a self-consistent loop. We successfully obtained the self-energy (accurate to second order) with minimal computational effort.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"Regarding further work, perhaps the single most obvious direction is the extension to multi-particle quantities; And indeed, Refs. [7, 8] did exactly this, with Markus Wallerberger writing the as of yet unpublished Julia library OvercompleteIR.jl which builds upon SparseIR.jl. So, as a transitive dependency, the library of the present thesis has already found applications in solving the parquet equations for the Hubbard model and for the Anderson impurity model [9].","category":"page"},{"location":"guide/#References","page":"Guide","title":"References","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"H. Shinaoka, J. Otsuki, M. Ohzeki and K. Yoshimi. Compressing Green's function using intermediate representation between imaginary-time and real-frequency domains. Physical Review B 96, 35147 (2017).\n\n\n\nJ. Li, M. Wallerberger, N. Chikano, C.-N. Yeh, E. Gull and H. Shinaoka. Sparse sampling approach to efficient ab initio calculations at finite temperature. Physical Review B 101, 035144 (2020).\n\n\n\nH. Shinaoka, N. Chikano, E. Gull, J. Li, T. Nomoto, J. Otsuki, M. Wallerberger, T. Wang and K. Yoshimi. Efficient ab initio many-body calculations based on sparse modeling of Matsubara Green's function. SciPost Phys. Lect. Notes, 63 (2022).\n\n\n\nM. Wallerberger, S. Badr, S. Hoshino, S. Huber, F. Kakizawa, T. Koretsune, Y. Nagai, K. Nogaki, T. Nomoto, H. Mori, J. Otsuki, S. Ozaki, T. Plaikner, R. Sakurai, C. Vogel, N. Witt, K. Yoshimi and H. Shinaoka, sparse-ir: Optimal compression and sparse sampling of many-body propagators. SoftwareX 21, 101266 (2023-02).\n\n\n\nP. C. Hansen. Discrete Inverse Problems: Insights and Algorithms (SIAM, 2010).\n\n\n\nM. Helton and O. Smith. Bessels.jl (2022).\n\n\n\nH. Shinaoka, J. Otsuki, K. Haule, M. Wallerberger, E. Gull, K. Yoshimi and M. Ohzeki. Overcomplete compact representation of two-particle Green's functions. Physical Review B 97, 205111 (2018-05).\n\n\n\nM. Wallerberger, H. Shinaoka and A. Kauch. Solving the Bethe-Salpeter equation with exponential convergence. Physical Review Research 3, 033168 (2021-08).\n\n\n\nM. Michalek. Solving the Anderson impurity model with intermediate representation of the parquet equations (2024).\n\n\n\n","category":"page"},{"location":"guide/#optimized-script","page":"Guide","title":"Appendix: Optimized script","text":"","category":"section"},{"location":"guide/","page":"Guide","title":"Guide","text":"With minimal modifications we can transform our code to be more optimized for performance:","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"Put script in a function. This is because globals are type instable in Julia.\nAdd ::Vector{Float64} annotation to ensure type inferrability of ρ₀l.\nGl in the loop will be a Vector{ComplexF64} in the loop, so make it complex right away for type stability.\nPreallocate and reuse arrays to remove allocations in the loop, minimizing total allocations and time spent garbage collecting. Here we benefit from SparseIR.jl providing in-place variants fit! and evaluate!.","category":"page"},{"location":"guide/","page":"Guide","title":"Guide","text":"using SparseIR\n\nfunction main(; β=10.0, ωmax=8.0, ε=1e-6)\n # Construct the IR basis and sparse sampling for fermionic propagators\n basis = FiniteTempBasis{Fermionic}(β, ωmax, ε)\n sτ = TauSampling(basis)\n siω = MatsubaraSampling(basis; positive_only=true)\n\n # Solve the single impurity Anderson model coupled to a bath with a\n # semicircular density of states with unit half bandwidth.\n U = 1.2\n ρ₀(ω) = 2 / π * √(1 - clamp(ω, -1, +1)^2)\n\n # Compute the IR basis coefficients for the non-interacting propagator\n ρ₀l = overlap.(basis.v, ρ₀)::Vector{Float64}\n G₀l = -basis.s .* ρ₀l\n\n # Self-consistency loop: alternate between second-order expression for the\n # self-energy and the Dyson equation until convergence.\n Gl = complex(G₀l)\n G₀iω = evaluate(siω, G₀l)\n\n # Preallocate arrays for the self-energy and the Green's function\n Σl = similar(Gl)\n Στ = similar(Gl, ComplexF64, length(sampling_points(sτ)))\n Σiω = similar(G₀iω)\n Gτ = similar(Στ)\n Giω = similar(G₀iω)\n\n Gl_prev = zero(Gl)\n while !isapprox(Gl, Gl_prev, rtol=ε)\n Gl_prev .= Gl\n evaluate!(Gτ, sτ, Gl)\n @. Στ = U^2 * Gτ^3\n fit!(Σl, sτ, Στ)\n evaluate!(Σiω, siω, Σl)\n @. Giω = (G₀iω^-1 - Σiω)^-1\n fit!(Gl, siω, Giω)\n end\n return basis, Σl\nend","category":"page"},{"location":"private/","page":"Private","title":"Private","text":"CurrentModule = SparseIR","category":"page"},{"location":"private/#Private-names-index","page":"Private","title":"Private names index","text":"","category":"section"},{"location":"private/","page":"Private","title":"Private","text":"These are not considered API and therefore not covered by any semver promises.","category":"page"},{"location":"private/","page":"Private","title":"Private","text":"Modules = [SparseIR]\nPrivate = true\nPublic = false","category":"page"},{"location":"private/#Core.Int-Tuple{MatsubaraFreq}","page":"Private","title":"Core.Int","text":"Get prefactor n for the Matsubara frequency ω = n*π/β\n\n\n\n\n\n","category":"method"},{"location":"private/#Core.Integer-Tuple{MatsubaraFreq}","page":"Private","title":"Core.Integer","text":"Get prefactor n for the Matsubara frequency ω = n*π/β\n\n\n\n\n\n","category":"method"},{"location":"private/#Core.Union-Union{Tuple{MatsubaraFreq{S}}, Tuple{S}} where S","page":"Private","title":"Core.Union","text":"(polyFT::PiecewiseLegendreFT)(ω)\n\nObtain Fourier transform of polynomial for given MatsubaraFreq ω.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.AbstractAugmentation","page":"Private","title":"SparseIR.AbstractAugmentation","text":"AbstractAugmentation\n\nScalar function in imaginary time/frequency.\n\nThis represents a single function in imaginary time and frequency, together with some auxiliary methods that make it suitable for augmenting a basis.\n\nSee also: AugmentedBasis\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.AbstractBasis","page":"Private","title":"SparseIR.AbstractBasis","text":"AbstractBasis\n\nAbstract base class for bases on the imaginary-time axis.\n\nLet basis be an abstract basis. Then we can expand a two-point propagator G(τ), where τ is imaginary time, into a set of basis functions:\n\nG(τ) == sum(basis.u[l](τ) * g[l] for l in 1:length(basis)) + ϵ(τ),\n\nwhere basis.u[l] is the l-th basis function, g[l] is the associated expansion coefficient and ϵ(τ) is an error term. Similarly, the Fourier transform Ĝ(n), where n is now a Matsubara frequency, can be expanded as follows:\n\nĜ(n) == sum(basis.uhat[l](n) * g[l] for l in 1:length(basis)) + ϵ(n),\n\nwhere basis.uhat[l] is now the Fourier transform of the basis function.\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.AbstractKernel","page":"Private","title":"SparseIR.AbstractKernel","text":"(kernel::AbstractKernel)(x, y[, x₊, x₋])\n\nEvaluate kernel at point (x, y).\n\nThe parameters x₊ and x₋, if given, shall contain the values of x - xₘᵢₙ and xₘₐₓ - x, respectively. This is useful if either difference is to be formed and cancellation expected.\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.AbstractKernel-2","page":"Private","title":"SparseIR.AbstractKernel","text":"AbstractKernel\n\nIntegral kernel K(x, y).\n\nAbstract base type for an integral kernel, i.e. a AbstractFloat binary function K(x y) used in a Fredhold integral equation of the first kind:\n\n u(x) = K(x y) v(y) dy\n\nwhere x x_mathrmmin x_mathrmmax and y y_mathrmmin y_mathrmmax. For its SVE to exist, the kernel must be square-integrable, for its singular values to decay exponentially, it must be smooth.\n\nIn general, the kernel is applied to a scaled spectral function ρ(y) as:\n\n K(x y) ρ(y) dy\n\nwhere ρ(y) = w(y) ρ(y).\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.AbstractSVEHints","page":"Private","title":"SparseIR.AbstractSVEHints","text":"AbstractSVEHints\n\nDiscretization hints for singular value expansion of a given kernel.\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.AbstractSampling","page":"Private","title":"SparseIR.AbstractSampling","text":"AbstractSampling\n\nAbstract type for sparse sampling.\n\nEncodes the \"basis transformation\" of a propagator from the truncated IR basis coefficients G_ir[l] to time/frequency sampled on sparse points G(x[i]) together with its inverse, a least squares fit:\n\n ________________ ___________________\n | | evaluate | |\n | Basis |---------------->| Value on |\n | coefficients |<----------------| sampling points |\n |________________| fit |___________________|\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.CentrosymmSVE","page":"Private","title":"SparseIR.CentrosymmSVE","text":"CentrosymmSVE <: AbstractSVE\n\nSVE of centrosymmetric kernel in block-diagonal (even/odd) basis.\n\nFor a centrosymmetric kernel K, i.e., a kernel satisfying: K(x, y) == K(-x, -y), one can make the following ansatz for the singular functions:\n\nu[l](x) = ured[l](x) + sign[l] * ured[l](-x)\nv[l](y) = vred[l](y) + sign[l] * ured[l](-y)\n\nwhere sign[l] is either +1 or -1. This means that the singular value expansion can be block-diagonalized into an even and an odd part by (anti-)symmetrizing the kernel:\n\nK_even = K(x, y) + K(x, -y)\nK_odd = K(x, y) - K(x, -y)\n\nThe lth basis function, restricted to the positive interval, is then the singular function of one of these kernels. If the kernel generates a Chebyshev system [1], then even and odd basis functions alternate.\n\n[1]: A. Karlin, Total Positivity (1968).\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.LogisticKernelOdd","page":"Private","title":"SparseIR.LogisticKernelOdd","text":"LogisticKernelOdd <: AbstractReducedKernel\n\nFermionic analytical continuation kernel, odd.\n\nIn dimensionless variables x = 2τβ - 1, y = βωΛ, the fermionic integral kernel is a function on -1 1 -1 1:\n\n K(x y) = -fracsinh(Λ x y 2)cosh(Λ y 2)\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.PiecewiseLegendreFT","page":"Private","title":"SparseIR.PiecewiseLegendreFT","text":"PiecewiseLegendreFT <: Function\n\nFourier transform of a piecewise Legendre polynomial.\n\nFor a given frequency index n, the Fourier transform of the Legendre function is defined as:\n\n p̂(n) == ∫ dx exp(im * π * n * x / (xmax - xmin)) p(x)\n\nThe polynomial is continued either periodically (freq=:even), in which case n must be even, or antiperiodically (freq=:odd), in which case n must be odd.\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.PiecewiseLegendrePoly","page":"Private","title":"SparseIR.PiecewiseLegendrePoly","text":"PiecewiseLegendrePoly <: Function\n\nPiecewise Legendre polynomial.\n\nModels a function on the interval xmin xmax as a set of segments on the intervals Si = ai ai+1, where on each interval the function is expanded in scaled Legendre polynomials.\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.PiecewiseLegendrePolyVector","page":"Private","title":"SparseIR.PiecewiseLegendrePolyVector","text":"PiecewiseLegendrePolyVector\n\nContains a Vector{PiecewiseLegendrePoly}.\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.PowerModel","page":"Private","title":"SparseIR.PowerModel","text":"PowerModel\n\nModel from a high-frequency series expansion::\n\nA(iω) == sum(A[n] / (iω)^(n+1) for n in 1:N)\n\nwhere iω == i * π2 * wn is a reduced imaginary frequency, i.e., wn is an odd/even number for fermionic/bosonic frequencies.\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.ReducedKernel","page":"Private","title":"SparseIR.ReducedKernel","text":"ReducedKernel\n\nRestriction of centrosymmetric kernel to positive interval.\n\nFor a kernel K on -1 1 -1 1 that is centrosymmetric, i.e. K(x y) = K(-x -y), it is straight-forward to show that the left/right singular vectors can be chosen as either odd or even functions.\n\nConsequentially, they are singular functions of a reduced kernel K_mathrmred on 0 1 0 1 that is given as either:\n\n K_mathrmred(x y) = K(x y) pm K(x -y)\n\nThis kernel is what this type represents. The full singular functions can be reconstructed by (anti-)symmetrically continuing them to the negative axis.\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.RegularizedBoseKernelOdd","page":"Private","title":"SparseIR.RegularizedBoseKernelOdd","text":"RegularizedBoseKernelOdd <: AbstractReducedKernel\n\nBosonic analytical continuation kernel, odd.\n\nIn dimensionless variables x = 2 τ β - 1, y = β ω Λ, the fermionic integral kernel is a function on -1 1 -1 1:\n\n K(x y) = -y fracsinh(Λ x y 2)sinh(Λ y 2)\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.Rule","page":"Private","title":"SparseIR.Rule","text":"Rule{T<:AbstractFloat}\n\nQuadrature rule.\n\nApproximation of an integral over [a, b] by a sum over discrete points x with weights w:\n\n f(x) ω(x) dx _i f(x_i) w_i\n\nwhere we generally have superexponential convergence for smooth f(x) in the number of quadrature points.\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.SVEResult-Tuple{SparseIR.AbstractKernel}","page":"Private","title":"SparseIR.SVEResult","text":"SVEResult(kernel::AbstractKernel;\n Twork=nothing, ε=nothing, lmax=typemax(Int),\n n_gauss=nothing, svd_strat=:auto,\n sve_strat=iscentrosymmetric(kernel) ? CentrosymmSVE : SamplingSVE\n)\n\nPerform truncated singular value expansion of a kernel.\n\nPerform a truncated singular value expansion (SVE) of an integral kernel kernel : [xmin, xmax] x [ymin, ymax] -> ℝ:\n\nkernel(x, y) == sum(s[l] * u[l](x) * v[l](y) for l in (1, 2, 3, ...)),\n\nwhere s[l] are the singular values, which are ordered in non-increasing fashion, u[l](x) are the left singular functions, which form an orthonormal system on [xmin, xmax], and v[l](y) are the right singular functions, which form an orthonormal system on [ymin, ymax].\n\nThe SVE is mapped onto the singular value decomposition (SVD) of a matrix by expanding the kernel in piecewise Legendre polynomials (by default by using a collocation).\n\nArguments\n\nK::AbstractKernel: Integral kernel to take SVE from.\nε::Real: Accuracy target for the basis: attempt to have singular values down to a relative magnitude of ε, and have each singular value and singular vector be accurate to ε. A Twork with a machine epsilon of ε^2 or lower is required to satisfy this. Defaults to 2.2e-16 if xprec is available, and 1.5e-8 otherwise.\ncutoff::Real: Relative cutoff for the singular values. A Twork with machine epsilon of cutoff is required to satisfy this. Defaults to a small multiple of the machine epsilon.\nNote that cutoff and ε serve distinct purposes. cutoff reprsents the accuracy to which the kernel is reproduced, whereas ε is the accuracy to which the singular values and vectors are guaranteed.\nlmax::Integer: Maximum basis size. If given, only at most the lmax most significant singular values and associated singular functions are returned.\n`n_gauss (int): Order of Legendre polynomials. Defaults to kernel hinted value.\nTwork: Working data type. Defaults to a data type with machine epsilon of at mostε^2and at mostcutoff`, or otherwise most accurate data type available.\nsve_strat::AbstractSVE: SVE to SVD translation strategy. Defaults to SamplingSVE, optionally wrapped inside of a CentrosymmSVE if the kernel is centrosymmetric.\nsvd_strat ('fast' or 'default' or 'accurate'): SVD solver. Defaults to fast (ID/RRQR) based solution when accuracy goals are moderate, and more accurate Jacobi-based algorithm otherwise.\n\nReturns: An SVEResult containing the truncated singular value expansion.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.SamplingSVE","page":"Private","title":"SparseIR.SamplingSVE","text":"SamplingSVE <: AbstractSVE\n\nSVE to SVD translation by sampling technique [1].\n\nMaps the singular value expansion (SVE) of a kernel kernel onto the singular value decomposition of a matrix A. This is achieved by choosing two sets of Gauss quadrature rules: (x, wx) and (y, wy) and approximating the integrals in the SVE equations by finite sums. This implies that the singular values of the SVE are well-approximated by the singular values of the following matrix:\n\nA[i, j] = √(wx[i]) * K(x[i], y[j]) * √(wy[j])\n\nand the values of the singular functions at the Gauss sampling points can be reconstructed from the singular vectors u and v as follows:\n\nu[l,i] ≈ √(wx[i]) u[l](x[i])\nv[l,j] ≈ √(wy[j]) u[l](y[j])\n\n[1] P. Hansen, Discrete Inverse Problems, Ch. 3.1\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.Statistics","page":"Private","title":"SparseIR.Statistics","text":"Statistics(zeta)\n\nAbstract type for quantum statistics (fermionic/bosonic/etc.)\n\n\n\n\n\n","category":"type"},{"location":"private/#SparseIR.accuracy","page":"Private","title":"SparseIR.accuracy","text":"accuracy(basis::AbstractBasis)\n\nAccuracy of the basis.\n\nUpper bound to the relative error of reprensenting a propagator with the given number of basis functions (number between 0 and 1).\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.canonicalize!-Tuple{Any, Any}","page":"Private","title":"SparseIR.canonicalize!","text":"canonicalize!(u, v)\n\nCanonicalize basis.\n\nEach SVD (u[l], v[l]) pair is unique only up to a global phase, which may differ from implementation to implementation and also platform. We fix that gauge by demanding u[l](1) > 0. This ensures a diffeomorphic connection to the Legendre polynomials as Λ → 0.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.choose_accuracy-Tuple{Any, Any, Any}","page":"Private","title":"SparseIR.choose_accuracy","text":"choose_accuracy(ε, Twork[, svd_strat])\n\nChoose work type and accuracy based on specs and defaults\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.compute_unl_inner-Tuple{SparseIR.PiecewiseLegendrePoly, Any}","page":"Private","title":"SparseIR.compute_unl_inner","text":"compute_unl_inner(poly, wn)\n\nCompute piecewise Legendre to Matsubara transform.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.conv_radius","page":"Private","title":"SparseIR.conv_radius","text":"conv_radius(kernel)\n\nConvergence radius of the Matsubara basis asymptotic model.\n\nFor improved relative numerical accuracy, the IR basis functions on the Matsubara axis uhat(basis, n) can be evaluated from an asymptotic expression for abs(n) > conv_radius. If isinf(conv_radius), then the asymptotics are unused (the default).\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.default_matsubara_sampling_points","page":"Private","title":"SparseIR.default_matsubara_sampling_points","text":"default_matsubara_sampling_points(basis::AbstractBasis; positive_only=false)\n\nDefault sampling points on the imaginary frequency axis.\n\nArguments\n\npositive_only::Bool: Only return non-negative frequencies. This is useful if the object to be fitted is symmetric in Matsubura frequency, ĝ(ω) == conj(ĝ(-ω)), or, equivalently, real in imaginary time.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.default_tau_sampling_points","page":"Private","title":"SparseIR.default_tau_sampling_points","text":"default_tau_sampling_points(basis::AbstractBasis)\n\nDefault sampling points on the imaginary time/x axis.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.deriv-Union{Tuple{SparseIR.PiecewiseLegendrePoly}, Tuple{n}, Tuple{SparseIR.PiecewiseLegendrePoly, Val{n}}} where n","page":"Private","title":"SparseIR.deriv","text":"deriv(poly[, ::Val{n}=Val(1)])\n\nGet polynomial for the nth derivative.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.eval_matrix","page":"Private","title":"SparseIR.eval_matrix","text":"eval_matrix(T, basis, x)\n\nReturn evaluation matrix from coefficients to sampling points. T <: AbstractSampling.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.find_extrema-Tuple{SparseIR.PiecewiseLegendreFT}","page":"Private","title":"SparseIR.find_extrema","text":"find_extrema(polyFT::PiecewiseLegendreFT; part=nothing, grid=DEFAULT_GRID)\n\nObtain extrema of Fourier-transformed polynomial.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.finite_temp_bases","page":"Private","title":"SparseIR.finite_temp_bases","text":"finite_temp_bases(β::Real, ωmax::Real, ε=nothing;\n kernel=LogisticKernel(β * ωmax), sve_result=SVEResult(kernel; ε))\n\nConstruct FiniteTempBasis objects for fermion and bosons using the same LogisticKernel instance.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.from_IR","page":"Private","title":"SparseIR.from_IR","text":"from_IR(dlr::DiscreteLehmannRepresentation, gl::AbstractArray, dims=1)\n\nFrom IR to DLR. gl`: Expansion coefficients in IR.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.get_symmetrized-Tuple{SparseIR.AbstractKernel, Any}","page":"Private","title":"SparseIR.get_symmetrized","text":"get_symmetrized(kernel, sign)\n\nConstruct a symmetrized version of kernel, i.e. kernel(x, y) + sign * kernel(x, -y).\n\nwarning: Beware!\nBy default, this returns a simple wrapper over the current instance which naively performs the sum. You may want to override this to avoid cancellation.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.get_tnl-Tuple{Any, Any}","page":"Private","title":"SparseIR.get_tnl","text":"get_tnl(l, w)\n\nFourier integral of the l-th Legendre polynomial::\n\nTₗ(ω) == ∫ dx exp(iωx) Pₗ(x)\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.giw-Tuple{Any, Integer}","page":"Private","title":"SparseIR.giw","text":"giw(polyFT, wn)\n\nReturn model Green's function for reduced frequencies\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.iscentrosymmetric","page":"Private","title":"SparseIR.iscentrosymmetric","text":"iscentrosymmetric(kernel)\n\nReturn true if kernel(x, y) == kernel(-x, -y) for all values of x and y in range. This allows the kernel to be block-diagonalized, speeding up the singular value expansion by a factor of 4. Defaults to false.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.iswellconditioned-Tuple{SparseIR.AbstractBasis}","page":"Private","title":"SparseIR.iswellconditioned","text":"iswellconditioned(basis::AbstractBasis)\n\nReturns true if the sampling is expected to be well-conditioned.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.joinrules-Union{Tuple{AbstractArray{SparseIR.Rule{T}, 1}}, Tuple{T}} where T","page":"Private","title":"SparseIR.joinrules","text":"joinrules(rules)\n\nJoin multiple Gauss quadratures together.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.legder-Union{Tuple{AbstractMatrix{T}}, Tuple{T}, Tuple{AbstractMatrix{T}, Any}} where T","page":"Private","title":"SparseIR.legder","text":"legder\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.legendre-Union{Tuple{Any}, Tuple{T}, Tuple{Any, Type{T}}} where T","page":"Private","title":"SparseIR.legendre","text":"legendre(n[, T])\n\nGauss-Legendre quadrature with n points on [-1, 1].\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.legendre_collocation","page":"Private","title":"SparseIR.legendre_collocation","text":"legendre_collocation(rule, n=length(rule.x))\n\nGenerate collocation matrix from Gauss-Legendre rule.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.legvander-Union{Tuple{T}, Tuple{AbstractVector{T}, Integer}} where T","page":"Private","title":"SparseIR.legvander","text":"legvander(x, deg)\n\nPseudo-Vandermonde matrix of degree deg.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.matop!-Union{Tuple{N}, Tuple{T}, Tuple{S}, Tuple{AbstractArray{S, N}, Any, AbstractArray{T, N}, Any, Any}} where {S, T, N}","page":"Private","title":"SparseIR.matop!","text":"matop!(buffer, mat, arr::AbstractArray, op, dim)\n\nApply the operator op to the matrix mat and to the array arr along the first dimension (dim=1) or the last dimension (dim=N).\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.matop_along_dim!-Union{Tuple{N}, Tuple{T}, Tuple{Any, Any, AbstractArray{T, N}, Any, Any}} where {T, N}","page":"Private","title":"SparseIR.matop_along_dim!","text":"matop_along_dim!(buffer, mat, arr::AbstractArray, dim::Integer, op)\n\nApply the operator op to the matrix mat and to the array arr along the dimension dim, writing the result to buffer.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.matrices-Tuple{SparseIR.SamplingSVE}","page":"Private","title":"SparseIR.matrices","text":"matrices(sve::AbstractSVE)\n\nSVD problems underlying the SVE.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.matrix_from_gauss-Union{Tuple{T}, Tuple{Any, SparseIR.Rule{T}, SparseIR.Rule{T}}} where T","page":"Private","title":"SparseIR.matrix_from_gauss","text":"matrix_from_gauss(kernel, gauss_x, gauss_y)\n\nCompute matrix for kernel from Gauss rules.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.movedim-Union{Tuple{N}, Tuple{T}, Tuple{AbstractArray{T, N}, Pair}} where {T, N}","page":"Private","title":"SparseIR.movedim","text":"movedim(arr::AbstractArray, src => dst)\n\nMove arr's dimension at src to dst while keeping the order of the remaining dimensions unchanged.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.ngauss","page":"Private","title":"SparseIR.ngauss","text":"ngauss(hints)\n\nGauss-Legendre order to use to guarantee accuracy.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.nsvals-Tuple{SparseIR.SVEHintsLogistic}","page":"Private","title":"SparseIR.nsvals","text":"nsvals(hints)\n\nUpper bound for number of singular values.\n\nUpper bound on the number of singular values above the given threshold, i.e. where s[l] ≥ ε * first(s).\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.phase_stable-Tuple{Any, Integer}","page":"Private","title":"SparseIR.phase_stable","text":"phase_stable(poly, wn)\n\nPhase factor for the piecewise Legendre to Matsubara transform.\n\nCompute the following phase factor in a stable way:\n\nexp.(iπ/2 * wn * cumsum(Δx(poly)))\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.piecewise-Tuple{Any, Vector}","page":"Private","title":"SparseIR.piecewise","text":"piecewise(rule, edges)\n\nPiecewise quadrature with the same quadrature rule, but scaled.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.postprocess-Tuple{SparseIR.SamplingSVE, Any, Any, Any}","page":"Private","title":"SparseIR.postprocess","text":"postprocess(sve::AbstractSVE, u, s, v)\n\nConstruct the SVE result from the SVD.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.rescale-Tuple{FiniteTempBasis, Any}","page":"Private","title":"SparseIR.rescale","text":"rescale(basis::FiniteTempBasis, new_β)\n\nReturn a basis for different temperature.\n\nUses the same kernel with the same ε, but a different temperature. Note that this implies a different UV cutoff ωmax, since Λ == β * ωmax stays constant.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.reseat-Tuple{SparseIR.Rule, Any, Any}","page":"Private","title":"SparseIR.reseat","text":"reseat(rule, a, b)\n\nReseat quadrature rule to new domain.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.roots-Tuple{SparseIR.PiecewiseLegendrePoly}","page":"Private","title":"SparseIR.roots","text":"roots(poly)\n\nFind all roots of the piecewise polynomial poly.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.scale-Tuple{Any, Any}","page":"Private","title":"SparseIR.scale","text":"scale(rule, factor)\n\nScale weights by factor.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.segments_x-Union{Tuple{SparseIR.SVEHintsLogistic}, Tuple{T}, Tuple{SparseIR.SVEHintsLogistic, Type{T}}} where T","page":"Private","title":"SparseIR.segments_x","text":"segments_x(sve_hints::AbstractSVEHints[, T])\n\nSegments for piecewise polynomials on the x axis.\n\nList of segments on the x axis for the associated piecewise polynomial. Should reflect the approximate position of roots of a high-order singular function in x.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.segments_y-Union{Tuple{SparseIR.SVEHintsLogistic}, Tuple{T}, Tuple{SparseIR.SVEHintsLogistic, Type{T}}} where T","page":"Private","title":"SparseIR.segments_y","text":"segments_y(sve_hints::AbstractSVEHints[, T])\n\nSegments for piecewise polynomials on the y axis.\n\nList of segments on the y axis for the associated piecewise polynomial. Should reflect the approximate position of roots of a high-order singular function in y.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.shift_xmid-Tuple{Any, Any}","page":"Private","title":"SparseIR.shift_xmid","text":"shift_xmid(knots, Δx)\n\nReturn midpoint relative to the nearest integer plus a shift.\n\nReturn the midpoints xmid of the segments, as pair (diff, shift), where shift is in (0, 1, -1) and diff is a float such that xmid == shift + diff to floating point accuracy.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.significance","page":"Private","title":"SparseIR.significance","text":"significance(basis::AbstractBasis)\n\nReturn vector σ, where 0 ≤ σ[i] ≤ 1 is the significance level of the i-th basis function. If ϵ is the desired accuracy to which to represent a propagator, then any basis function where σ[i] < ϵ can be neglected.\n\nFor the IR basis, we simply have that σ[i] = s[i] / first(s).\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.split-Tuple{Any, Real}","page":"Private","title":"SparseIR.split","text":"split(poly, x)\n\nSplit segment.\n\nFind segment of poly's domain that covers x.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.statistics-Union{Tuple{SparseIR.AbstractBasis{S}}, Tuple{S}} where S<:SparseIR.Statistics","page":"Private","title":"SparseIR.statistics","text":"statistics(basis::AbstractBasis)\n\nQuantum statistic (Statistics instance, Fermionic() or Bosonic()).\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.sve_hints","page":"Private","title":"SparseIR.sve_hints","text":"sve_hints(kernel, ε)\n\nProvide discretisation hints for the SVE routines.\n\nAdvises the SVE routines of discretisation parameters suitable in tranforming the (infinite) SVE into an (finite) SVD problem.\n\nSee also AbstractSVEHints.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.to_IR","page":"Private","title":"SparseIR.to_IR","text":"to_IR(dlr::DiscreteLehmannRepresentation, g_dlr::AbstractArray, dims=1)\n\nFrom DLR to IR. g_dlr`: Expansion coefficients in DLR.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.truncate-Tuple{Any, Any, Any}","page":"Private","title":"SparseIR.truncate","text":"truncate(u, s, v; rtol=0.0, lmax=typemax(Int))\n\nTruncate singular value expansion.\n\nArguments\n\n- `u`, `s`, `v`: Thin singular value expansion\n- `rtol`: Only singular values satisfying `s[l]/s[1] > rtol` are retained.\n- `lmax`: At most the `lmax` most significant singular values are retained.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.value-Tuple{MatsubaraFreq, Real}","page":"Private","title":"SparseIR.value","text":"Get value of the Matsubara frequency ω = n*π/β\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.valueim-Tuple{MatsubaraFreq, Real}","page":"Private","title":"SparseIR.valueim","text":"Get complex value of the Matsubara frequency iω = iπ/β * n\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.weight_func","page":"Private","title":"SparseIR.weight_func","text":"weight_func(kernel, statistics::Statistics)\n\nReturn the weight function for the given statistics.\n\nFermion: w(x) == 1\nBoson: w(y) == 1/tanh(Λ*y/2)\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.workarrlength-Tuple{SparseIR.AbstractSampling, AbstractArray}","page":"Private","title":"SparseIR.workarrlength","text":"workarrlength(smpl::AbstractSampling, al; dim=1)\n\nReturn length of workarr for fit!.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.xrange","page":"Private","title":"SparseIR.xrange","text":"xrange(kernel)\n\nReturn a tuple (x_mathrmmin x_mathrmmax) delimiting the range of allowed x values.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.ypower","page":"Private","title":"SparseIR.ypower","text":"ypower(kernel)\n\nPower with which the y coordinate scales.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.yrange","page":"Private","title":"SparseIR.yrange","text":"yrange(kernel)\n\nReturn a tuple (y_mathrmmin y_mathrmmax) delimiting the range of allowed y values.\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.zeta-Tuple{MatsubaraFreq}","page":"Private","title":"SparseIR.zeta","text":"Get statistics ζ for Matsubara frequency ω = (2*m+ζ)*π/β\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.Λ","page":"Private","title":"SparseIR.Λ","text":"Λ(basis::AbstractBasis)\nlambda(basis::AbstractBasis)\n\nBasis cutoff parameter, Λ = β * ωmax, or None if not present\n\n\n\n\n\n","category":"function"},{"location":"private/#SparseIR.β-Tuple{SparseIR.AbstractBasis}","page":"Private","title":"SparseIR.β","text":"β(basis::AbstractBasis)\nbeta(basis::AbstractBasis)\n\nInverse temperature or nothing if unscaled basis.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR.ωmax","page":"Private","title":"SparseIR.ωmax","text":"ωmax(basis::AbstractBasis)\nwmax(basis::AbstractBasis)\n\nReal frequency cutoff or nothing if unscaled basis.\n\n\n\n\n\n","category":"function"},{"location":"private/","page":"Private","title":"Private","text":"Modules = [SparseIR._LinAlg]\nPrivate = true\nPublic = true","category":"page"},{"location":"private/#SparseIR._LinAlg.givens_lmul-Union{Tuple{T}, Tuple{Tuple{T, T}, Any}} where T","page":"Private","title":"SparseIR._LinAlg.givens_lmul","text":"Apply Givens rotation to vector:\n\n [ a ] = [ c s ] [ x ]\n [ b ] [ -s c ] [ y ]\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.givens_params-Union{Tuple{T}, Tuple{T, T}} where T<:AbstractFloat","page":"Private","title":"SparseIR._LinAlg.givens_params","text":"Compute Givens rotation R matrix that satisfies:\n\n[ c s ] [ f ] [ r ]\n[ -s c ] [ g ] = [ 0 ]\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.rrqr!-Union{Tuple{AbstractMatrix{T}}, Tuple{T}} where T<:AbstractFloat","page":"Private","title":"SparseIR._LinAlg.rrqr!","text":"Truncated rank-revealing QR decomposition with full column pivoting.\n\nDecomposes a (m, n) matrix A into the product:\n\nA[:,piv] == Q * R\n\nwhere Q is an (m, k) isometric matrix, R is a (k, n) upper triangular matrix, piv is a permutation vector, and k is chosen such that the relative tolerance tol is met in the equality above.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.rrqr-Union{Tuple{AbstractMatrix{T}}, Tuple{T}} where T<:AbstractFloat","page":"Private","title":"SparseIR._LinAlg.rrqr","text":"Truncated rank-revealing QR decomposition with full column pivoting.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.svd2x2-Union{Tuple{T}, NTuple{4, T}} where T","page":"Private","title":"SparseIR._LinAlg.svd2x2","text":"Perform the SVD of an arbitrary two-by-two matrix:\n\n [ a11 a12 ] = [ cu -su ] [ smax 0 ] [ cv sv ]\n [ a21 a22 ] [ su cu ] [ 0 smin ] [ -sv cv ]\n\nNote that smax and smin can be negative.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.svd2x2-Union{Tuple{T}, Tuple{T, T, T}} where T<:AbstractFloat","page":"Private","title":"SparseIR._LinAlg.svd2x2","text":"Perform the SVD of upper triangular two-by-two matrix:\n\n [ f g ] = [ cu -su ] [ smax 0 ] [ cv sv ]\n [ 0 h ] [ su cu ] [ 0 smin ] [ -sv cv ]\n\nNote that smax and smin can be negative.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.svd_jacobi!-Union{Tuple{AbstractMatrix{T}}, Tuple{T}} where T","page":"Private","title":"SparseIR._LinAlg.svd_jacobi!","text":"Singular value decomposition using Jacobi rotations.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.svd_jacobi-Union{Tuple{AbstractMatrix{T}}, Tuple{T}} where T","page":"Private","title":"SparseIR._LinAlg.svd_jacobi","text":"Singular value decomposition using Jacobi rotations.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.truncate_qr_result-Union{Tuple{T}, Tuple{LinearAlgebra.QRPivoted{T, S, C} where {S<:AbstractMatrix{T}, C<:AbstractVector{T}}, Integer}} where T","page":"Private","title":"SparseIR._LinAlg.truncate_qr_result","text":"Truncate RRQR result low-rank\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.tsvd!-Union{Tuple{AbstractMatrix{T}}, Tuple{T}} where T<:AbstractFloat","page":"Private","title":"SparseIR._LinAlg.tsvd!","text":"Truncated singular value decomposition.\n\nDecomposes an (m, n) matrix A into the product:\n\nA == U * (s .* VT)\n\nwhere U is a (m, k) matrix with orthogonal columns, VT is a (k, n) matrix with orthogonal rows and s are the singular values, a set of k nonnegative numbers in non-ascending order. The SVD is truncated in the sense that singular values below tol are discarded.\n\n\n\n\n\n","category":"method"},{"location":"private/#SparseIR._LinAlg.tsvd-Union{Tuple{AbstractMatrix{T}}, Tuple{T}} where T<:AbstractFloat","page":"Private","title":"SparseIR._LinAlg.tsvd","text":"Truncated singular value decomposition.\n\n\n\n\n\n","category":"method"},{"location":"public/","page":"Public","title":"Public","text":"CurrentModule = SparseIR","category":"page"},{"location":"public/#Public-names-index","page":"Public","title":"Public names index","text":"","category":"section"},{"location":"public/","page":"Public","title":"Public","text":"Modules = [SparseIR]\nPrivate = false\nPublic = true","category":"page"},{"location":"public/#SparseIR.SparseIR","page":"Public","title":"SparseIR.SparseIR","text":"Intermediate representation (IR) for many-body propagators.\n\n\n\n\n\n","category":"module"},{"location":"public/#SparseIR.AugmentedBasis","page":"Public","title":"SparseIR.AugmentedBasis","text":"AugmentedBasis <: AbstractBasis\n\nAugmented basis on the imaginary-time/frequency axis.\n\nGroups a set of additional functions, augmentations, with a given basis. The augmented functions then form the first basis functions, while the rest is provided by the regular basis, i.e.:\n\nu[l](x) == l < naug ? augmentations[l](x) : basis.u[l-naug](x),\n\nwhere naug = length(augmentations) is the number of added basis functions through augmentation. Similar expressions hold for Matsubara frequencies.\n\nAugmentation is useful in constructing bases for vertex-like quantities such as self-energies [wallerberger2021] and when constructing a two-point kernel that serves as a base for multi-point functions [shinaoka2018].\n\nwarning: Warning\nBases augmented with TauConst and TauLinear tend to be poorly conditioned. Care must be taken while fitting and compactness should be enforced if possible to regularize the problem.While vertex bases, i.e. bases augmented with MatsubaraConst, stay reasonably well-conditioned, it is still good practice to treat the Hartree–Fock term separately rather than including it in the basis, if possible.\n\nSee also: MatsubaraConst for vertex basis [wallerberger2021], TauConst, TauLinear for multi-point [shinaoka2018]\n\n[wallerberger2021]: https://doi.org/10.1103/PhysRevResearch.3.033168\n\n[shinaoka2018]: https://doi.org/10.1103/PhysRevB.97.205111\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.Bosonic","page":"Public","title":"SparseIR.Bosonic","text":"Bosonic statistics.\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.DiscreteLehmannRepresentation","page":"Public","title":"SparseIR.DiscreteLehmannRepresentation","text":"DiscreteLehmannRepresentation <: AbstractBasis\n\nDiscrete Lehmann representation (DLR) with poles selected according to extrema of IR.\n\nThis class implements a variant of the discrete Lehmann representation (DLR) 1. Instead of a truncated singular value expansion of the analytic continuation kernel K like the IR, the discrete Lehmann representation is based on a \"sketching\" of K. The resulting basis is a linear combination of discrete set of poles on the real-frequency axis, continued to the imaginary-frequency axis:\n\n G(iv) == sum(a[i] / (iv - w[i]) for i in range(L))\n\nWarning The poles on the real-frequency axis selected for the DLR are based on a rank-revealing decomposition, which offers accuracy guarantees. Here, we instead select the pole locations based on the zeros of the IR basis functions on the real axis, which is a heuristic. We do not expect that difference to matter, but please don't blame the DLR authors if we were wrong :-)\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.Fermionic","page":"Public","title":"SparseIR.Fermionic","text":"Fermionic statistics.\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.FiniteTempBasis","page":"Public","title":"SparseIR.FiniteTempBasis","text":"FiniteTempBasis <: AbstractBasis\n\nIntermediate representation (IR) basis for given temperature.\n\nFor a continuation kernel K from real frequencies, ω ∈ [-ωmax, ωmax], to imaginary time, τ ∈ [0, β], this type stores the truncated singular value expansion or IR basis:\n\nK(τ, ω) ≈ sum(u[l](τ) * s[l] * v[l](ω) for l in 1:L)\n\nThis basis is inferred from a reduced form by appropriate scaling of the variables.\n\nFields\n\nu::PiecewiseLegendrePolyVector: Set of IR basis functions on the imaginary time (tau) axis. These functions are stored as piecewise Legendre polynomials.\nTo obtain the value of all basis functions at a point or a array of points x, you can call the function u(x). To obtain a single basis function, a slice or a subset l, you can use u[l].\nuhat::PiecewiseLegendreFT: Set of IR basis functions on the Matsubara frequency (wn) axis. These objects are stored as a set of Bessel functions.\nTo obtain the value of all basis functions at a Matsubara frequency or a array of points wn, you can call the function uhat(wn). Note that we expect reduced frequencies, which are simply even/odd numbers for bosonic/fermionic objects. To obtain a single basis function, a slice or a subset l, you can use uhat[l].\ns: Vector of singular values of the continuation kernel\nv::PiecewiseLegendrePoly: Set of IR basis functions on the real frequency (w) axis. These functions are stored as piecewise Legendre polynomials.\nTo obtain the value of all basis functions at a point or a array of points w, you can call the function v(w). To obtain a single basis function, a slice or a subset l, you can use v[l].\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.FiniteTempBasis-Union{Tuple{S}, Tuple{Real, Real}, Tuple{Real, Real, Any}} where S","page":"Public","title":"SparseIR.FiniteTempBasis","text":"FiniteTempBasis{S}(β, ωmax, ε=nothing; max_size=nothing, args...)\n\nConstruct a finite temperature basis suitable for the given S (Fermionic or Bosonic) and cutoffs β and ωmax.\n\n\n\n\n\n","category":"method"},{"location":"public/#SparseIR.FiniteTempBasisSet","page":"Public","title":"SparseIR.FiniteTempBasisSet","text":"FiniteTempBasisSet\n\nType for holding IR bases and sparse-sampling objects.\n\nAn object of this type holds IR bases for fermions and bosons and associated sparse-sampling objects.\n\nFields\n\nbasis_f::FiniteTempBasis: Fermion basis\nbasis_b::FiniteTempBasis: Boson basis\ntau::Vector{Float64}: Sampling points in the imaginary-time domain\nwn_f::Vector{Int}: Sampling fermionic frequencies\nwn_b::Vector{Int}: Sampling bosonic frequencies\nsmpltauf::TauSampling: Sparse sampling for tau & fermion\nsmpltaub::TauSampling: Sparse sampling for tau & boson\nsmplwnf::MatsubaraSampling: Sparse sampling for Matsubara frequency & fermion\nsmplwnb::MatsubaraSampling: Sparse sampling for Matsubara frequency & boson\nsve_result::Tuple{PiecewiseLegendrePoly,Vector{Float64},PiecewiseLegendrePoly}: Results of SVE\n\nGetters\n\nbeta::Float64: Inverse temperature\nωmax::Float64: Cut-off frequency\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.LogisticKernel","page":"Public","title":"SparseIR.LogisticKernel","text":"LogisticKernel <: AbstractKernel\n\nFermionic/bosonic analytical continuation kernel.\n\nIn dimensionless variables x = 2 τβ - 1, y = β ωΛ, the integral kernel is a function on -1 1 -1 1:\n\n K(x y) = frace^-Λ y (x + 1) 21 + e^-Λ y\n\nLogisticKernel is a fermionic analytic continuation kernel. Nevertheless, one can model the τ dependence of a bosonic correlation function as follows:\n\n frace^-Λ y (x + 1) 21 - e^-Λ y ρ(y) dy = K(x y) ρ(y) dy\n\nwith\n\n ρ(y) = w(y) ρ(y)\n\nwhere the weight function is given by\n\n w(y) = frac1tanh(Λ y2)\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.MatsubaraConst","page":"Public","title":"SparseIR.MatsubaraConst","text":"MatsubaraConst <: AbstractAugmentation\n\nConstant in Matsubara, undefined in imaginary time.\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.MatsubaraFreq","page":"Public","title":"SparseIR.MatsubaraFreq","text":"MatsubaraFreq(n)\n\nPrefactor n of the Matsubara frequency ω = n*π/β\n\nStruct representing the Matsubara frequency ω entering the Fourier transform of a propagator G(τ) on imaginary time τ to its Matsubara equivalent Ĝ(iω) on the imaginary-frequency axis:\n\n β\nĜ(iω) = ∫ dτ exp(iωτ) G(τ) with ω = n π/β,\n 0\n\nwhere β is inverse temperature and by convention we include the imaginary unit in the frequency argument, i.e, Ĝ(iω). The frequencies depend on the statistics of the propagator, i.e., we have that:\n\nG(τ - β) = ± G(τ)\n\nwhere + is for bosons and - is for fermions. The frequencies are restricted accordingly.\n\nBosonic frequency (S == Fermionic): n even (periodic in β)\nFermionic frequency (S == Bosonic): n odd (anti-periodic in β)\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.MatsubaraSampling","page":"Public","title":"SparseIR.MatsubaraSampling","text":"MatsubaraSampling <: AbstractSampling\n\nSparse sampling in Matsubara frequencies.\n\nAllows the transformation between the IR basis and a set of sampling points in (scaled/unscaled) imaginary frequencies.\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.MatsubaraSampling-Tuple{SparseIR.AbstractBasis}","page":"Public","title":"SparseIR.MatsubaraSampling","text":"MatsubaraSampling(basis; positive_only=false,\n sampling_points=default_matsubara_sampling_points(basis; positive_only),\n factorize=true)\n\nConstruct a MatsubaraSampling object. If not given, the sampling_points are chosen as the (discrete) extrema of the highest-order basis function in Matsubara. This turns out to be close to optimal with respect to conditioning for this size (within a few percent).\n\nBy setting positive_only=true, one assumes that functions to be fitted are symmetric in Matsubara frequency, i.e.:\n\n G(iν) = conj(G(-iν))\n\nor equivalently, that they are purely real in imaginary time. In this case, sparse sampling is performed over non-negative frequencies only, cutting away half of the necessary sampling space. factorize controls whether the SVD decomposition is computed.\n\n\n\n\n\n","category":"method"},{"location":"public/#SparseIR.RegularizedBoseKernel","page":"Public","title":"SparseIR.RegularizedBoseKernel","text":"RegularizedBoseKernel <: AbstractKernel\n\nRegularized bosonic analytical continuation kernel.\n\nIn dimensionless variables x = 2 τβ - 1, y = β ωΛ, the fermionic integral kernel is a function on -1 1 -1 1:\n\n K(x y) = y frace^-Λ y (x + 1) 2e^-Λ y - 1\n\nCare has to be taken in evaluating this expression around y = 0.\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.TauConst","page":"Public","title":"SparseIR.TauConst","text":"TauConst <: AbstractAugmentation\n\nConstant in imaginary time/discrete delta in frequency.\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.TauLinear","page":"Public","title":"SparseIR.TauLinear","text":"TauLinear <: AbstractAugmentation\n\nLinear function in imaginary time, antisymmetric around β/2.\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.TauSampling","page":"Public","title":"SparseIR.TauSampling","text":"TauSampling <: AbstractSampling\n\nSparse sampling in imaginary time.\n\nAllows the transformation between the IR basis and a set of sampling points in (scaled/unscaled) imaginary time.\n\n\n\n\n\n","category":"type"},{"location":"public/#SparseIR.TauSampling-Tuple{SparseIR.AbstractBasis}","page":"Public","title":"SparseIR.TauSampling","text":"TauSampling(basis; sampling_points=default_tau_sampling_points(basis), factorize=true)\n\nConstruct a TauSampling object. If not given, the sampling_points are chosen as the extrema of the highest-order basis function in imaginary time. This turns out to be close to optimal with respect to conditioning for this size (within a few percent). factorize controls whether the SVD decomposition is computed.\n\n\n\n\n\n","category":"method"},{"location":"public/#SparseIR.evaluate!-Union{Tuple{N}, Tuple{T}, Tuple{S}, Tuple{AbstractArray{T, N}, SparseIR.AbstractSampling, AbstractArray{S, N}}} where {S, T, N}","page":"Public","title":"SparseIR.evaluate!","text":"evaluate!(buffer::AbstractArray{T,N}, sampling, al; dim=1) where {T,N}\n\nLike evaluate, but write the result to buffer. Please use dim = 1 or N to avoid allocating large temporary arrays internally.\n\n\n\n\n\n","category":"method"},{"location":"public/#SparseIR.evaluate-Union{Tuple{N}, Tuple{T}, Tuple{Tmat}, Tuple{S}, Tuple{SparseIR.AbstractSampling{S, Tmat}, AbstractArray{T, N}}} where {S, Tmat, T, N}","page":"Public","title":"SparseIR.evaluate","text":"evaluate(sampling, al; dim=1)\n\nEvaluate the basis coefficients al at the sparse sampling points.\n\n\n\n\n\n","category":"method"},{"location":"public/#SparseIR.fit!-Union{Tuple{N}, Tuple{T}, Tuple{S}, Tuple{Array{S, N}, SparseIR.AbstractSampling, Array{T, N}}} where {S, T, N}","page":"Public","title":"SparseIR.fit!","text":"fit!(buffer::Array{S,N}, smpl::AbstractSampling, al::Array{T,N}; \n dim=1, workarr::Vector{S}) where {S,T,N}\n\nLike fit, but write the result to buffer. Use dim = 1 or dim = N to avoid allocating large temporary arrays internally. The length of workarr cannot be smaller than SparseIR.workarrlength(smpl, al).\n\n\n\n\n\n","category":"method"},{"location":"public/#SparseIR.fit-Union{Tuple{N}, Tuple{T}, Tuple{Tmat}, Tuple{S}, Tuple{SparseIR.AbstractSampling{S, Tmat}, AbstractArray{T, N}}} where {S, Tmat, T, N}","page":"Public","title":"SparseIR.fit","text":"fit(sampling, al::AbstractArray{T,N}; dim=1)\n\nFit basis coefficients from the sparse sampling points Please use dim = 1 or N to avoid allocating large temporary arrays internally.\n\n\n\n\n\n","category":"method"},{"location":"public/#SparseIR.overlap-Union{Tuple{F}, Tuple{SparseIR.PiecewiseLegendrePoly, F}} where F","page":"Public","title":"SparseIR.overlap","text":"overlap(poly::PiecewiseLegendrePoly, f; \n rtol=eps(T), return_error=false, maxevals=10^4, points=T[])\n\nEvaluate overlap integral of poly with arbitrary function f.\n\nGiven the function f, evaluate the integral\n\n∫ dx f(x) poly(x)\n\nusing adaptive Gauss-Legendre quadrature.\n\npoints is a sequence of break points in the integration interval where local difficulties of the integrand may occur (e.g. singularities, discontinuities).\n\n\n\n\n\n","category":"method"},{"location":"","page":"Home","title":"Home","text":"CurrentModule = SparseIR","category":"page"},{"location":"#SparseIR.jl","page":"Home","title":"SparseIR.jl","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"Documentation for SparseIR.jl.","category":"page"},{"location":"","page":"Home","title":"Home","text":"There is a guide available which details SparseIR.jl's inner workings by means of a worked example.","category":"page"},{"location":"","page":"Home","title":"Home","text":"For listings of all documented names, see Public names index and the Private names index.","category":"page"}] }