diff --git a/dev/.documenter-siteinfo.json b/dev/.documenter-siteinfo.json index 2796fcc..7fc7232 100644 --- a/dev/.documenter-siteinfo.json +++ b/dev/.documenter-siteinfo.json @@ -1 +1 @@ -{"documenter":{"julia_version":"1.10.0","generation_timestamp":"2024-02-14T11:35:09","documenter_version":"1.2.1"}} \ No newline at end of file +{"documenter":{"julia_version":"1.10.2","generation_timestamp":"2024-03-07T18:54:08","documenter_version":"1.3.0"}} \ No newline at end of file diff --git a/dev/assets/documenter.js b/dev/assets/documenter.js index a42c6bb..a9c88fb 100644 --- a/dev/assets/documenter.js +++ b/dev/assets/documenter.js @@ -4,7 +4,6 @@ requirejs.config({ 'highlight-julia': 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.8.0/languages/julia.min', 'headroom': 'https://cdnjs.cloudflare.com/ajax/libs/headroom/0.12.0/headroom.min', 'jqueryui': 'https://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.13.2/jquery-ui.min', - 'minisearch': 'https://cdn.jsdelivr.net/npm/minisearch@6.1.0/dist/umd/index.min', 'jquery': 'https://cdnjs.cloudflare.com/ajax/libs/jquery/3.7.0/jquery.min', 'headroom-jquery': 'https://cdnjs.cloudflare.com/ajax/libs/headroom/0.12.0/jQuery.headroom.min', 'highlight': 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.8.0/highlight.min', @@ -109,9 +108,10 @@ $(document).on("click", ".docstring header", function () { }); }); -$(document).on("click", ".docs-article-toggle-button", function () { +$(document).on("click", ".docs-article-toggle-button", function (event) { let articleToggleTitle = "Expand docstring"; let navArticleToggleTitle = "Expand all docstrings"; + let animationSpeed = event.noToggleAnimation ? 0 : 400; debounce(() => { if (isExpanded) { @@ -122,7 +122,7 @@ $(document).on("click", ".docs-article-toggle-button", function () { isExpanded = false; - $(".docstring section").slideUp(); + $(".docstring section").slideUp(animationSpeed); } else { $(this).removeClass("fa-chevron-down").addClass("fa-chevron-up"); $(".docstring-article-toggle-button") @@ -133,7 +133,7 @@ $(document).on("click", ".docs-article-toggle-button", function () { articleToggleTitle = "Collapse docstring"; navArticleToggleTitle = "Collapse all docstrings"; - $(".docstring section").slideDown(); + $(".docstring section").slideDown(animationSpeed); } $(this).prop("title", navArticleToggleTitle); @@ -230,224 +230,465 @@ $(document).ready(function () { }) //////////////////////////////////////////////////////////////////////////////// -require(['jquery', 'minisearch'], function($, minisearch) { - -// In general, most search related things will have "search" as a prefix. -// To get an in-depth about the thought process you can refer: https://hetarth02.hashnode.dev/series/gsoc +require(['jquery'], function($) { -let results = []; -let timer = undefined; +$(document).ready(function () { + let meta = $("div[data-docstringscollapsed]").data(); -let data = documenterSearchIndex["docs"].map((x, key) => { - x["id"] = key; // minisearch requires a unique for each object - return x; + if (meta?.docstringscollapsed) { + $("#documenter-article-toggle-button").trigger({ + type: "click", + noToggleAnimation: true, + }); + } }); -// list below is the lunr 2.1.3 list minus the intersect with names(Base) -// (all, any, get, in, is, only, which) and (do, else, for, let, where, while, with) -// ideally we'd just filter the original list but it's not available as a variable -const stopWords = new Set([ - "a", - "able", - "about", - "across", - "after", - "almost", - "also", - "am", - "among", - "an", - "and", - "are", - "as", - "at", - "be", - "because", - "been", - "but", - "by", - "can", - "cannot", - "could", - "dear", - "did", - "does", - "either", - "ever", - "every", - "from", - "got", - "had", - "has", - "have", - "he", - "her", - "hers", - "him", - "his", - "how", - "however", - "i", - "if", - "into", - "it", - "its", - "just", - "least", - "like", - "likely", - "may", - "me", - "might", - "most", - "must", - "my", - "neither", - "no", - "nor", - "not", - "of", - "off", - "often", - "on", - "or", - "other", - "our", - "own", - "rather", - "said", - "say", - "says", - "she", - "should", - "since", - "so", - "some", - "than", - "that", - "the", - "their", - "them", - "then", - "there", - "these", - "they", - "this", - "tis", - "to", - "too", - "twas", - "us", - "wants", - "was", - "we", - "were", - "what", - "when", - "who", - "whom", - "why", - "will", - "would", - "yet", - "you", - "your", -]); - -let index = new minisearch({ - fields: ["title", "text"], // fields to index for full-text search - storeFields: ["location", "title", "text", "category", "page"], // fields to return with search results - processTerm: (term) => { - let word = stopWords.has(term) ? null : term; - if (word) { - // custom trimmer that doesn't strip @ and !, which are used in julia macro and function names - word = word - .replace(/^[^a-zA-Z0-9@!]+/, "") - .replace(/[^a-zA-Z0-9@!]+$/, ""); - } +}) +//////////////////////////////////////////////////////////////////////////////// +require(['jquery'], function($) { - return word ?? null; - }, - // add . as a separator, because otherwise "title": "Documenter.Anchors.add!", would not find anything if searching for "add!", only for the entire qualification - tokenize: (string) => string.split(/[\s\-\.]+/), - // options which will be applied during the search - searchOptions: { - boost: { title: 100 }, - fuzzy: 2, +/* +To get an in-depth about the thought process you can refer: https://hetarth02.hashnode.dev/series/gsoc + +PSEUDOCODE: + +Searching happens automatically as the user types or adjusts the selected filters. +To preserve responsiveness, as much as possible of the slow parts of the search are done +in a web worker. Searching and result generation are done in the worker, and filtering and +DOM updates are done in the main thread. The filters are in the main thread as they should +be very quick to apply. This lets filters be changed without re-searching with minisearch +(which is possible even if filtering is on the worker thread) and also lets filters be +changed _while_ the worker is searching and without message passing (neither of which are +possible if filtering is on the worker thread) + +SEARCH WORKER: + +Import minisearch + +Build index + +On message from main thread + run search + find the first 200 unique results from each category, and compute their divs for display + note that this is necessary and sufficient information for the main thread to find the + first 200 unique results from any given filter set + post results to main thread + +MAIN: + +Launch worker + +Declare nonconstant globals (worker_is_running, last_search_text, unfiltered_results) + +On text update + if worker is not running, launch_search() + +launch_search + set worker_is_running to true, set last_search_text to the search text + post the search query to worker + +on message from worker + if last_search_text is not the same as the text in the search field, + the latest search result is not reflective of the latest search query, so update again + launch_search() + otherwise + set worker_is_running to false + + regardless, display the new search results to the user + save the unfiltered_results as a global + update_search() + +on filter click + adjust the filter selection + update_search() + +update_search + apply search filters by looping through the unfiltered_results and finding the first 200 + unique results that match the filters + + Update the DOM +*/ + +/////// SEARCH WORKER /////// + +function worker_function(documenterSearchIndex, documenterBaseURL, filters) { + importScripts( + "https://cdn.jsdelivr.net/npm/minisearch@6.1.0/dist/umd/index.min.js" + ); + + let data = documenterSearchIndex.map((x, key) => { + x["id"] = key; // minisearch requires a unique for each object + return x; + }); + + // list below is the lunr 2.1.3 list minus the intersect with names(Base) + // (all, any, get, in, is, only, which) and (do, else, for, let, where, while, with) + // ideally we'd just filter the original list but it's not available as a variable + const stopWords = new Set([ + "a", + "able", + "about", + "across", + "after", + "almost", + "also", + "am", + "among", + "an", + "and", + "are", + "as", + "at", + "be", + "because", + "been", + "but", + "by", + "can", + "cannot", + "could", + "dear", + "did", + "does", + "either", + "ever", + "every", + "from", + "got", + "had", + "has", + "have", + "he", + "her", + "hers", + "him", + "his", + "how", + "however", + "i", + "if", + "into", + "it", + "its", + "just", + "least", + "like", + "likely", + "may", + "me", + "might", + "most", + "must", + "my", + "neither", + "no", + "nor", + "not", + "of", + "off", + "often", + "on", + "or", + "other", + "our", + "own", + "rather", + "said", + "say", + "says", + "she", + "should", + "since", + "so", + "some", + "than", + "that", + "the", + "their", + "them", + "then", + "there", + "these", + "they", + "this", + "tis", + "to", + "too", + "twas", + "us", + "wants", + "was", + "we", + "were", + "what", + "when", + "who", + "whom", + "why", + "will", + "would", + "yet", + "you", + "your", + ]); + + let index = new MiniSearch({ + fields: ["title", "text"], // fields to index for full-text search + storeFields: ["location", "title", "text", "category", "page"], // fields to return with results processTerm: (term) => { let word = stopWords.has(term) ? null : term; if (word) { + // custom trimmer that doesn't strip @ and !, which are used in julia macro and function names word = word .replace(/^[^a-zA-Z0-9@!]+/, "") .replace(/[^a-zA-Z0-9@!]+$/, ""); + + word = word.toLowerCase(); } return word ?? null; }, + // add . as a separator, because otherwise "title": "Documenter.Anchors.add!", would not + // find anything if searching for "add!", only for the entire qualification tokenize: (string) => string.split(/[\s\-\.]+/), - }, -}); + // options which will be applied during the search + searchOptions: { + prefix: true, + boost: { title: 100 }, + fuzzy: 2, + }, + }); + + index.addAll(data); + + /** + * Used to map characters to HTML entities. + * Refer: https://github.com/lodash/lodash/blob/main/src/escape.ts + */ + const htmlEscapes = { + "&": "&", + "<": "<", + ">": ">", + '"': """, + "'": "'", + }; + + /** + * Used to match HTML entities and HTML characters. + * Refer: https://github.com/lodash/lodash/blob/main/src/escape.ts + */ + const reUnescapedHtml = /[&<>"']/g; + const reHasUnescapedHtml = RegExp(reUnescapedHtml.source); + + /** + * Escape function from lodash + * Refer: https://github.com/lodash/lodash/blob/main/src/escape.ts + */ + function escape(string) { + return string && reHasUnescapedHtml.test(string) + ? string.replace(reUnescapedHtml, (chr) => htmlEscapes[chr]) + : string || ""; + } + + /** + * Make the result component given a minisearch result data object and the value + * of the search input as queryString. To view the result object structure, refer: + * https://lucaong.github.io/minisearch/modules/_minisearch_.html#searchresult + * + * @param {object} result + * @param {string} querystring + * @returns string + */ + function make_search_result(result, querystring) { + let search_divider = `
`; + let display_link = + result.location.slice(Math.max(0), Math.min(50, result.location.length)) + + (result.location.length > 30 ? "..." : ""); // To cut-off the link because it messes with the overflow of the whole div -index.addAll(data); + if (result.page !== "") { + display_link += ` (${result.page})`; + } + + let textindex = new RegExp(`${querystring}`, "i").exec(result.text); + let text = + textindex !== null + ? result.text.slice( + Math.max(textindex.index - 100, 0), + Math.min( + textindex.index + querystring.length + 100, + result.text.length + ) + ) + : ""; // cut-off text before and after from the match + + text = text.length ? escape(text) : ""; + + let display_result = text.length + ? "..." + + text.replace( + new RegExp(`${escape(querystring)}`, "i"), // For first occurrence + '$&' + ) + + "..." + : ""; // highlights the match + + let in_code = false; + if (!["page", "section"].includes(result.category.toLowerCase())) { + in_code = true; + } + + // We encode the full url to escape some special characters which can lead to broken links + let result_div = ` + +
+
${escape(result.title)}
+
${result.category}
+
+

+ ${display_result} +

+
+ ${display_link} +
+
+ ${search_divider} + `; + + return result_div; + } + + self.onmessage = function (e) { + let query = e.data; + let results = index.search(query, { + filter: (result) => { + // Only return relevant results + return result.score >= 1; + }, + }); -let filters = [...new Set(data.map((x) => x.category))]; -var modal_filters = make_modal_body_filters(filters); -var filter_results = []; + // Pre-filter to deduplicate and limit to 200 per category to the extent + // possible without knowing what the filters are. + let filtered_results = []; + let counts = {}; + for (let filter of filters) { + counts[filter] = 0; + } + let present = {}; + + for (let result of results) { + cat = result.category; + cnt = counts[cat]; + if (cnt < 200) { + id = cat + "---" + result.location; + if (present[id]) { + continue; + } + present[id] = true; + filtered_results.push({ + location: result.location, + category: cat, + div: make_search_result(result, query), + }); + } + } + + postMessage(filtered_results); + }; +} -$(document).on("keyup", ".documenter-search-input", function (event) { - // Adding a debounce to prevent disruptions from super-speed typing! - debounce(() => update_search(filter_results), 300); +// `worker = Threads.@spawn worker_function(documenterSearchIndex)`, but in JavaScript! +const filters = [ + ...new Set(documenterSearchIndex["docs"].map((x) => x.category)), +]; +const worker_str = + "(" + + worker_function.toString() + + ")(" + + JSON.stringify(documenterSearchIndex["docs"]) + + "," + + JSON.stringify(documenterBaseURL) + + "," + + JSON.stringify(filters) + + ")"; +const worker_blob = new Blob([worker_str], { type: "text/javascript" }); +const worker = new Worker(URL.createObjectURL(worker_blob)); + +/////// SEARCH MAIN /////// + +// Whether the worker is currently handling a search. This is a boolean +// as the worker only ever handles 1 or 0 searches at a time. +var worker_is_running = false; + +// The last search text that was sent to the worker. This is used to determine +// if the worker should be launched again when it reports back results. +var last_search_text = ""; + +// The results of the last search. This, in combination with the state of the filters +// in the DOM, is used compute the results to display on calls to update_search. +var unfiltered_results = []; + +// Which filter is currently selected +var selected_filter = ""; + +$(document).on("input", ".documenter-search-input", function (event) { + if (!worker_is_running) { + launch_search(); + } }); +function launch_search() { + worker_is_running = true; + last_search_text = $(".documenter-search-input").val(); + worker.postMessage(last_search_text); +} + +worker.onmessage = function (e) { + if (last_search_text !== $(".documenter-search-input").val()) { + launch_search(); + } else { + worker_is_running = false; + } + + unfiltered_results = e.data; + update_search(); +}; + $(document).on("click", ".search-filter", function () { if ($(this).hasClass("search-filter-selected")) { - $(this).removeClass("search-filter-selected"); + selected_filter = ""; } else { - $(this).addClass("search-filter-selected"); + selected_filter = $(this).text().toLowerCase(); } - // Adding a debounce to prevent disruptions from crazy clicking! - debounce(() => get_filters(), 300); + // This updates search results and toggles classes for UI: + update_search(); }); -/** - * A debounce function, takes a function and an optional timeout in milliseconds - * - * @function callback - * @param {number} timeout - */ -function debounce(callback, timeout = 300) { - clearTimeout(timer); - timer = setTimeout(callback, timeout); -} - /** * Make/Update the search component - * - * @param {string[]} selected_filters */ -function update_search(selected_filters = []) { - let initial_search_body = ` -
Type something to get started!
- `; - +function update_search() { let querystring = $(".documenter-search-input").val(); if (querystring.trim()) { - results = index.search(querystring, { - filter: (result) => { - // Filtering results - if (selected_filters.length === 0) { - return result.score >= 1; - } else { - return ( - result.score >= 1 && selected_filters.includes(result.category) - ); - } - }, - }); + if (selected_filter == "") { + results = unfiltered_results; + } else { + results = unfiltered_results.filter((result) => { + return selected_filter == result.category.toLowerCase(); + }); + } let search_result_container = ``; + let modal_filters = make_modal_body_filters(); let search_divider = `
`; if (results.length) { @@ -455,19 +696,23 @@ function update_search(selected_filters = []) { let count = 0; let search_results = ""; - results.forEach(function (result) { - if (result.location) { - // Checking for duplication of results for the same page - if (!links.includes(result.location)) { - search_results += make_search_result(result, querystring); - count++; - } - + for (var i = 0, n = results.length; i < n && count < 200; ++i) { + let result = results[i]; + if (result.location && !links.includes(result.location)) { + search_results += result.div; + count++; links.push(result.location); } - }); + } - let result_count = `
${count} result(s)
`; + if (count == 1) { + count_str = "1 result"; + } else if (count == 200) { + count_str = "200+ results"; + } else { + count_str = count + " results"; + } + let result_count = `
${count_str}
`; search_result_container = `
@@ -496,125 +741,37 @@ function update_search(selected_filters = []) { $(".search-modal-card-body").html(search_result_container); } else { - filter_results = []; - modal_filters = make_modal_body_filters(filters, filter_results); - if (!$(".search-modal-card-body").hasClass("is-justify-content-center")) { $(".search-modal-card-body").addClass("is-justify-content-center"); } - $(".search-modal-card-body").html(initial_search_body); + $(".search-modal-card-body").html(` +
Type something to get started!
+ `); } } /** * Make the modal filter html * - * @param {string[]} filters - * @param {string[]} selected_filters * @returns string */ -function make_modal_body_filters(filters, selected_filters = []) { - let str = ``; - - filters.forEach((val) => { - if (selected_filters.includes(val)) { - str += `${val}`; - } else { - str += `${val}`; - } - }); +function make_modal_body_filters() { + let str = filters + .map((val) => { + if (selected_filter == val.toLowerCase()) { + return `${val}`; + } else { + return `${val}`; + } + }) + .join(""); - let filter_html = ` + return `
Filters: ${str} -
- `; - - return filter_html; -} - -/** - * Make the result component given a minisearch result data object and the value of the search input as queryString. - * To view the result object structure, refer: https://lucaong.github.io/minisearch/modules/_minisearch_.html#searchresult - * - * @param {object} result - * @param {string} querystring - * @returns string - */ -function make_search_result(result, querystring) { - let search_divider = `
`; - let display_link = - result.location.slice(Math.max(0), Math.min(50, result.location.length)) + - (result.location.length > 30 ? "..." : ""); // To cut-off the link because it messes with the overflow of the whole div - - if (result.page !== "") { - display_link += ` (${result.page})`; - } - - let textindex = new RegExp(`\\b${querystring}\\b`, "i").exec(result.text); - let text = - textindex !== null - ? result.text.slice( - Math.max(textindex.index - 100, 0), - Math.min( - textindex.index + querystring.length + 100, - result.text.length - ) - ) - : ""; // cut-off text before and after from the match - - let display_result = text.length - ? "..." + - text.replace( - new RegExp(`\\b${querystring}\\b`, "i"), // For first occurrence - '$&' - ) + - "..." - : ""; // highlights the match - - let in_code = false; - if (!["page", "section"].includes(result.category.toLowerCase())) { - in_code = true; - } - - // We encode the full url to escape some special characters which can lead to broken links - let result_div = ` - -
-
${result.title}
-
${result.category}
-
-

- ${display_result} -

-
- ${display_link} -
-
- ${search_divider} - `; - - return result_div; -} - -/** - * Get selected filters, remake the filter html and lastly update the search modal - */ -function get_filters() { - let ele = $(".search-filters .search-filter-selected").get(); - filter_results = ele.map((x) => $(x).text().toLowerCase()); - modal_filters = make_modal_body_filters(filters, filter_results); - update_search(filter_results); +
`; } }) @@ -641,103 +798,107 @@ $(document).ready(function () { //////////////////////////////////////////////////////////////////////////////// require(['jquery'], function($) { -let search_modal_header = ` - -`; - -let initial_search_body = ` -
Type something to get started!
-`; - -let search_modal_footer = ` - -`; - -$(document.body).append( - ` - +\end{equation}\]

Together with the property $\vec u_\ell^\mathrm{T} \vec u_{\ell'} \approx \delta_{\ell\ell'} \approx \vec v_\ell^\mathrm{T} \vec v_{\ell'}$ we have successfully translated the original SVE problem into an SVD, because

\[ K = \sum_\ell S_\ell \vec u_\ell \vec v_\ell^\mathrm{T}.\]

  • The next step is calling the matrices function which computes the matrix $K$ derived in the previous step.

    Note

    The function is named in the plural because in the centrosymmetric case it actually returns two matrices $K_+$ and $K_-$, one for the even and one for the odd kernel. These matrices' SVDs are later concatenated, so for simplicity, we will refer to $K$ from here on out.

    Info

    Special care is taken here to avoid FP-arithmetic cancellation around $x = -1$ and $x = +1$.

    Kernel matrices Note that in the plot, the matrices are rotated 90 degrees to the left to make the connection with the (subregion $[0, 1] × [0, 1]$ of the) previous figure more obvious. Thus we can see how the choice of sampling points has magnified and brought to the matrices' centers the regions of interest. Furthermore, elements with absolute values smaller than $10\%$ of the maximum have been omitted to emphasize the structure; this should however not be taken to mean that there is any sparsity to speak of we could exploit in the next step.

  • Take the truncated singular value decompostion (TSVD) of $K$, or rather, of $K_+$ and $K_-$. We use here a custom TSVD routine written by Markus Wallerberger which combines a homemade rank-revealing QR decomposition with GenericLinearAlgebra.svd!. This is necessary because there is currently no TSVD for arbitrary types available.

  • Via the function truncate, we throw away superfluous terms in our expansion. More specifically, we choose $L$ in \eqref{SVE} such that $S_\ell / S_0 > \varepsilon$ for all $\ell \leq L$. Here $\varepsilon$ is our selected precision, in our case it's equal to the double precision machine epsilon, $2^{-52} \approx 2.22 \times 10^{-16}$.

  • diff --git a/dev/index.html b/dev/index.html index a5c5910..6442454 100644 --- a/dev/index.html +++ b/dev/index.html @@ -1,2 +1,2 @@ -Home · SparseIR.jl
    +Home · SparseIR.jl
    diff --git a/dev/objects.inv b/dev/objects.inv new file mode 100644 index 0000000..dfe04fb Binary files /dev/null and b/dev/objects.inv differ diff --git a/dev/private/index.html b/dev/private/index.html index ae63912..88db1bd 100644 --- a/dev/private/index.html +++ b/dev/private/index.html @@ -1,23 +1,23 @@ -Private · SparseIR.jl

    Private names index

    These are not considered API and therefore not covered by any semver promises.

    Core.IntMethod

    Get prefactor n for the Matsubara frequency ω = n*π/β

    source
    Core.UnionMethod
    (polyFT::PiecewiseLegendreFT)(ω)

    Obtain Fourier transform of polynomial for given MatsubaraFreq ω.

    source
    SparseIR.AbstractAugmentationType
    AbstractAugmentation

    Scalar function in imaginary time/frequency.

    This represents a single function in imaginary time and frequency, together with some auxiliary methods that make it suitable for augmenting a basis.

    See also: AugmentedBasis

    source
    SparseIR.AbstractBasisType
    AbstractBasis

    Abstract base class for bases on the imaginary-time axis.

    Let basis be an abstract basis. Then we can expand a two-point propagator G(τ), where τ is imaginary time, into a set of basis functions:

    G(τ) == sum(basis.u[l](τ) * g[l] for l in 1:length(basis)) + ϵ(τ),

    where basis.u[l] is the l-th basis function, g[l] is the associated expansion coefficient and ϵ(τ) is an error term. Similarly, the Fourier transform Ĝ(n), where n is now a Matsubara frequency, can be expanded as follows:

    Ĝ(n) == sum(basis.uhat[l](n) * g[l] for l in 1:length(basis)) + ϵ(n),

    where basis.uhat[l] is now the Fourier transform of the basis function.

    source
    SparseIR.AbstractKernelType
    (kernel::AbstractKernel)(x, y[, x₊, x₋])

    Evaluate kernel at point (x, y).

    The parameters x₊ and x₋, if given, shall contain the values of x - xₘᵢₙ and xₘₐₓ - x, respectively. This is useful if either difference is to be formed and cancellation expected.

    source
    SparseIR.AbstractKernelType
    AbstractKernel

    Integral kernel K(x, y).

    Abstract base type for an integral kernel, i.e. a AbstractFloat binary function $K(x, y)$ used in a Fredhold integral equation of the first kind:

    \[ u(x) = ∫ K(x, y) v(y) dy\]

    where $x ∈ [x_\mathrm{min}, x_\mathrm{max}]$ and $y ∈ [y_\mathrm{min}, y_\mathrm{max}]$. For its SVE to exist, the kernel must be square-integrable, for its singular values to decay exponentially, it must be smooth.

    In general, the kernel is applied to a scaled spectral function $ρ'(y)$ as:

    \[ ∫ K(x, y) ρ'(y) dy,\]

    where $ρ'(y) = w(y) ρ(y)$.

    source
    SparseIR.AbstractSamplingType
    AbstractSampling

    Abstract type for sparse sampling.

    Encodes the "basis transformation" of a propagator from the truncated IR basis coefficients G_ir[l] to time/frequency sampled on sparse points G(x[i]) together with its inverse, a least squares fit:

         ________________                   ___________________
    +Private · SparseIR.jl

    Private names index

    These are not considered API and therefore not covered by any semver promises.

    Core.IntMethod

    Get prefactor n for the Matsubara frequency ω = n*π/β

    source
    Core.UnionMethod
    (polyFT::PiecewiseLegendreFT)(ω)

    Obtain Fourier transform of polynomial for given MatsubaraFreq ω.

    source
    SparseIR.AbstractAugmentationType
    AbstractAugmentation

    Scalar function in imaginary time/frequency.

    This represents a single function in imaginary time and frequency, together with some auxiliary methods that make it suitable for augmenting a basis.

    See also: AugmentedBasis

    source
    SparseIR.AbstractBasisType
    AbstractBasis

    Abstract base class for bases on the imaginary-time axis.

    Let basis be an abstract basis. Then we can expand a two-point propagator G(τ), where τ is imaginary time, into a set of basis functions:

    G(τ) == sum(basis.u[l](τ) * g[l] for l in 1:length(basis)) + ϵ(τ),

    where basis.u[l] is the l-th basis function, g[l] is the associated expansion coefficient and ϵ(τ) is an error term. Similarly, the Fourier transform Ĝ(n), where n is now a Matsubara frequency, can be expanded as follows:

    Ĝ(n) == sum(basis.uhat[l](n) * g[l] for l in 1:length(basis)) + ϵ(n),

    where basis.uhat[l] is now the Fourier transform of the basis function.

    source
    SparseIR.AbstractKernelType
    (kernel::AbstractKernel)(x, y[, x₊, x₋])

    Evaluate kernel at point (x, y).

    The parameters x₊ and x₋, if given, shall contain the values of x - xₘᵢₙ and xₘₐₓ - x, respectively. This is useful if either difference is to be formed and cancellation expected.

    source
    SparseIR.AbstractKernelType
    AbstractKernel

    Integral kernel K(x, y).

    Abstract base type for an integral kernel, i.e. a AbstractFloat binary function $K(x, y)$ used in a Fredhold integral equation of the first kind:

    \[ u(x) = ∫ K(x, y) v(y) dy\]

    where $x ∈ [x_\mathrm{min}, x_\mathrm{max}]$ and $y ∈ [y_\mathrm{min}, y_\mathrm{max}]$. For its SVE to exist, the kernel must be square-integrable, for its singular values to decay exponentially, it must be smooth.

    In general, the kernel is applied to a scaled spectral function $ρ'(y)$ as:

    \[ ∫ K(x, y) ρ'(y) dy,\]

    where $ρ'(y) = w(y) ρ(y)$.

    source
    SparseIR.AbstractSamplingType
    AbstractSampling

    Abstract type for sparse sampling.

    Encodes the "basis transformation" of a propagator from the truncated IR basis coefficients G_ir[l] to time/frequency sampled on sparse points G(x[i]) together with its inverse, a least squares fit:

         ________________                   ___________________
         |                |    evaluate     |                   |
         |     Basis      |---------------->|     Value on      |
         |  coefficients  |<----------------|  sampling points  |
    -    |________________|      fit        |___________________|
    source
    SparseIR.CentrosymmSVEType
    CentrosymmSVE <: AbstractSVE

    SVE of centrosymmetric kernel in block-diagonal (even/odd) basis.

    For a centrosymmetric kernel K, i.e., a kernel satisfying: K(x, y) == K(-x, -y), one can make the following ansatz for the singular functions:

    u[l](x) = ured[l](x) + sign[l] * ured[l](-x)
    +    |________________|      fit        |___________________|
    source
    SparseIR.CentrosymmSVEType
    CentrosymmSVE <: AbstractSVE

    SVE of centrosymmetric kernel in block-diagonal (even/odd) basis.

    For a centrosymmetric kernel K, i.e., a kernel satisfying: K(x, y) == K(-x, -y), one can make the following ansatz for the singular functions:

    u[l](x) = ured[l](x) + sign[l] * ured[l](-x)
     v[l](y) = vred[l](y) + sign[l] * ured[l](-y)

    where sign[l] is either +1 or -1. This means that the singular value expansion can be block-diagonalized into an even and an odd part by (anti-)symmetrizing the kernel:

    K_even = K(x, y) + K(x, -y)
    -K_odd  = K(x, y) - K(x, -y)

    The lth basis function, restricted to the positive interval, is then the singular function of one of these kernels. If the kernel generates a Chebyshev system [1], then even and odd basis functions alternate.

    [1]: A. Karlin, Total Positivity (1968).

    source
    SparseIR.LogisticKernelOddType
    LogisticKernelOdd <: AbstractReducedKernel

    Fermionic analytical continuation kernel, odd.

    In dimensionless variables $x = 2τ/β - 1$, $y = βω/Λ$, the fermionic integral kernel is a function on $[-1, 1] × [-1, 1]$:

    \[ K(x, y) = -\frac{\sinh(Λ x y / 2)}{\cosh(Λ y / 2)}\]

    source
    SparseIR.PiecewiseLegendreFTType
    PiecewiseLegendreFT <: Function

    Fourier transform of a piecewise Legendre polynomial.

    For a given frequency index n, the Fourier transform of the Legendre function is defined as:

        p̂(n) == ∫ dx exp(im * π * n * x / (xmax - xmin)) p(x)

    The polynomial is continued either periodically (freq=:even), in which case n must be even, or antiperiodically (freq=:odd), in which case n must be odd.

    source
    SparseIR.PiecewiseLegendrePolyType
    PiecewiseLegendrePoly <: Function

    Piecewise Legendre polynomial.

    Models a function on the interval $[xmin, xmax]$ as a set of segments on the intervals $S[i] = [a[i], a[i+1]]$, where on each interval the function is expanded in scaled Legendre polynomials.

    source
    SparseIR.PowerModelType
    PowerModel

    Model from a high-frequency series expansion::

    A(iω) == sum(A[n] / (iω)^(n+1) for n in 1:N)

    where $iω == i * π/2 * wn$ is a reduced imaginary frequency, i.e., $wn$ is an odd/even number for fermionic/bosonic frequencies.

    source
    SparseIR.ReducedKernelType
    ReducedKernel

    Restriction of centrosymmetric kernel to positive interval.

    For a kernel $K$ on $[-1, 1] × [-1, 1]$ that is centrosymmetric, i.e. $K(x, y) = K(-x, -y)$, it is straight-forward to show that the left/right singular vectors can be chosen as either odd or even functions.

    Consequentially, they are singular functions of a reduced kernel $K_\mathrm{red}$ on $[0, 1] × [0, 1]$ that is given as either:

    \[ K_\mathrm{red}(x, y) = K(x, y) \pm K(x, -y)\]

    This kernel is what this type represents. The full singular functions can be reconstructed by (anti-)symmetrically continuing them to the negative axis.

    source
    SparseIR.RegularizedBoseKernelOddType
    RegularizedBoseKernelOdd <: AbstractReducedKernel

    Bosonic analytical continuation kernel, odd.

    In dimensionless variables $x = 2 τ / β - 1$, $y = β ω / Λ$, the fermionic integral kernel is a function on $[-1, 1] × [-1, 1]$:

    \[ K(x, y) = -y \frac{\sinh(Λ x y / 2)}{\sinh(Λ y / 2)}\]

    source
    SparseIR.RuleType
    Rule{T<:AbstractFloat}

    Quadrature rule.

    Approximation of an integral over [a, b] by a sum over discrete points x with weights w:

    \[ ∫ f(x) ω(x) dx ≈ ∑_i f(x_i) w_i\]

    where we generally have superexponential convergence for smooth $f(x)$ in the number of quadrature points.

    source
    SparseIR.SVEResultMethod
    SVEResult(kernel::AbstractKernel;
    +K_odd  = K(x, y) - K(x, -y)

    The lth basis function, restricted to the positive interval, is then the singular function of one of these kernels. If the kernel generates a Chebyshev system [1], then even and odd basis functions alternate.

    [1]: A. Karlin, Total Positivity (1968).

    source
    SparseIR.LogisticKernelOddType
    LogisticKernelOdd <: AbstractReducedKernel

    Fermionic analytical continuation kernel, odd.

    In dimensionless variables $x = 2τ/β - 1$, $y = βω/Λ$, the fermionic integral kernel is a function on $[-1, 1] × [-1, 1]$:

    \[ K(x, y) = -\frac{\sinh(Λ x y / 2)}{\cosh(Λ y / 2)}\]

    source
    SparseIR.PiecewiseLegendreFTType
    PiecewiseLegendreFT <: Function

    Fourier transform of a piecewise Legendre polynomial.

    For a given frequency index n, the Fourier transform of the Legendre function is defined as:

        p̂(n) == ∫ dx exp(im * π * n * x / (xmax - xmin)) p(x)

    The polynomial is continued either periodically (freq=:even), in which case n must be even, or antiperiodically (freq=:odd), in which case n must be odd.

    source
    SparseIR.PiecewiseLegendrePolyType
    PiecewiseLegendrePoly <: Function

    Piecewise Legendre polynomial.

    Models a function on the interval $[xmin, xmax]$ as a set of segments on the intervals $S[i] = [a[i], a[i+1]]$, where on each interval the function is expanded in scaled Legendre polynomials.

    source
    SparseIR.PowerModelType
    PowerModel

    Model from a high-frequency series expansion::

    A(iω) == sum(A[n] / (iω)^(n+1) for n in 1:N)

    where $iω == i * π/2 * wn$ is a reduced imaginary frequency, i.e., $wn$ is an odd/even number for fermionic/bosonic frequencies.

    source
    SparseIR.ReducedKernelType
    ReducedKernel

    Restriction of centrosymmetric kernel to positive interval.

    For a kernel $K$ on $[-1, 1] × [-1, 1]$ that is centrosymmetric, i.e. $K(x, y) = K(-x, -y)$, it is straight-forward to show that the left/right singular vectors can be chosen as either odd or even functions.

    Consequentially, they are singular functions of a reduced kernel $K_\mathrm{red}$ on $[0, 1] × [0, 1]$ that is given as either:

    \[ K_\mathrm{red}(x, y) = K(x, y) \pm K(x, -y)\]

    This kernel is what this type represents. The full singular functions can be reconstructed by (anti-)symmetrically continuing them to the negative axis.

    source
    SparseIR.RegularizedBoseKernelOddType
    RegularizedBoseKernelOdd <: AbstractReducedKernel

    Bosonic analytical continuation kernel, odd.

    In dimensionless variables $x = 2 τ / β - 1$, $y = β ω / Λ$, the fermionic integral kernel is a function on $[-1, 1] × [-1, 1]$:

    \[ K(x, y) = -y \frac{\sinh(Λ x y / 2)}{\sinh(Λ y / 2)}\]

    source
    SparseIR.RuleType
    Rule{T<:AbstractFloat}

    Quadrature rule.

    Approximation of an integral over [a, b] by a sum over discrete points x with weights w:

    \[ ∫ f(x) ω(x) dx ≈ ∑_i f(x_i) w_i\]

    where we generally have superexponential convergence for smooth $f(x)$ in the number of quadrature points.

    source
    SparseIR.SVEResultMethod
    SVEResult(kernel::AbstractKernel;
         Twork=nothing, ε=nothing, lmax=typemax(Int),
         n_gauss=nothing, svd_strat=:auto,
         sve_strat=iscentrosymmetric(kernel) ? CentrosymmSVE : SamplingSVE
    -)

    Perform truncated singular value expansion of a kernel.

    Perform a truncated singular value expansion (SVE) of an integral kernel kernel : [xmin, xmax] x [ymin, ymax] -> ℝ:

    kernel(x, y) == sum(s[l] * u[l](x) * v[l](y) for l in (1, 2, 3, ...)),

    where s[l] are the singular values, which are ordered in non-increasing fashion, u[l](x) are the left singular functions, which form an orthonormal system on [xmin, xmax], and v[l](y) are the right singular functions, which form an orthonormal system on [ymin, ymax].

    The SVE is mapped onto the singular value decomposition (SVD) of a matrix by expanding the kernel in piecewise Legendre polynomials (by default by using a collocation).

    Arguments

    • K::AbstractKernel: Integral kernel to take SVE from.

    • ε::Real: Accuracy target for the basis: attempt to have singular values down to a relative magnitude of ε, and have each singular value and singular vector be accurate to ε. A Twork with a machine epsilon of ε^2 or lower is required to satisfy this. Defaults to 2.2e-16 if xprec is available, and 1.5e-8 otherwise.

    • cutoff::Real: Relative cutoff for the singular values. A Twork with machine epsilon of cutoff is required to satisfy this. Defaults to a small multiple of the machine epsilon.

      Note that cutoff and ε serve distinct purposes. cutoff reprsents the accuracy to which the kernel is reproduced, whereas ε is the accuracy to which the singular values and vectors are guaranteed.

    • lmax::Integer: Maximum basis size. If given, only at most the lmax most significant singular values and associated singular functions are returned.

    • `n_gauss (int): Order of Legendre polynomials. Defaults to kernel hinted value.

    • Twork: Working data type. Defaults to a data type with machine epsilon of at mostε^2and at mostcutoff`, or otherwise most accurate data type available.

    • sve_strat::AbstractSVE: SVE to SVD translation strategy. Defaults to SamplingSVE, optionally wrapped inside of a CentrosymmSVE if the kernel is centrosymmetric.

    • svd_strat ('fast' or 'default' or 'accurate'): SVD solver. Defaults to fast (ID/RRQR) based solution when accuracy goals are moderate, and more accurate Jacobi-based algorithm otherwise.

    Returns: An SVEResult containing the truncated singular value expansion.

    source
    SparseIR.SamplingSVEType
    SamplingSVE <: AbstractSVE

    SVE to SVD translation by sampling technique [1].

    Maps the singular value expansion (SVE) of a kernel kernel onto the singular value decomposition of a matrix A. This is achieved by choosing two sets of Gauss quadrature rules: (x, wx) and (y, wy) and approximating the integrals in the SVE equations by finite sums. This implies that the singular values of the SVE are well-approximated by the singular values of the following matrix:

    A[i, j] = √(wx[i]) * K(x[i], y[j]) * √(wy[j])

    and the values of the singular functions at the Gauss sampling points can be reconstructed from the singular vectors u and v as follows:

    u[l,i] ≈ √(wx[i]) u[l](x[i])
    -v[l,j] ≈ √(wy[j]) u[l](y[j])

    [1] P. Hansen, Discrete Inverse Problems, Ch. 3.1

    source
    SparseIR.accuracyFunction
    accuracy(basis::AbstractBasis)

    Accuracy of the basis.

    Upper bound to the relative error of reprensenting a propagator with the given number of basis functions (number between 0 and 1).

    source
    SparseIR.canonicalize!Method
    canonicalize!(u, v)

    Canonicalize basis.

    Each SVD (u[l], v[l]) pair is unique only up to a global phase, which may differ from implementation to implementation and also platform. We fix that gauge by demanding u[l](1) > 0. This ensures a diffeomorphic connection to the Legendre polynomials as Λ → 0.

    source
    SparseIR.conv_radiusFunction
    conv_radius(kernel)

    Convergence radius of the Matsubara basis asymptotic model.

    For improved relative numerical accuracy, the IR basis functions on the Matsubara axis uhat(basis, n) can be evaluated from an asymptotic expression for abs(n) > conv_radius. If isinf(conv_radius), then the asymptotics are unused (the default).

    source
    SparseIR.default_matsubara_sampling_pointsFunction
    default_matsubara_sampling_points(basis::AbstractBasis; positive_only=false)

    Default sampling points on the imaginary frequency axis.

    Arguments

    • positive_only::Bool: Only return non-negative frequencies. This is useful if the object to be fitted is symmetric in Matsubura frequency, ĝ(ω) == conj(ĝ(-ω)), or, equivalently, real in imaginary time.
    source
    SparseIR.eval_matrixFunction
    eval_matrix(T, basis, x)

    Return evaluation matrix from coefficients to sampling points. T <: AbstractSampling.

    source
    SparseIR.find_extremaMethod
    find_extrema(polyFT::PiecewiseLegendreFT; part=nothing, grid=DEFAULT_GRID)

    Obtain extrema of Fourier-transformed polynomial.

    source
    SparseIR.finite_temp_basesFunction
    finite_temp_bases(β::Real, ωmax::Real, ε=nothing;
    -                  kernel=LogisticKernel(β * ωmax), sve_result=SVEResult(kernel; ε))

    Construct FiniteTempBasis objects for fermion and bosons using the same LogisticKernel instance.

    source
    SparseIR.from_IRFunction
    from_IR(dlr::DiscreteLehmannRepresentation, gl::AbstractArray, dims=1)

    From IR to DLR. gl`: Expansion coefficients in IR.

    source
    SparseIR.get_symmetrizedMethod
    get_symmetrized(kernel, sign)

    Construct a symmetrized version of kernel, i.e. kernel(x, y) + sign * kernel(x, -y).

    Beware!

    By default, this returns a simple wrapper over the current instance which naively performs the sum. You may want to override this to avoid cancellation.

    source
    SparseIR.get_tnlMethod
    get_tnl(l, w)

    Fourier integral of the l-th Legendre polynomial::

    Tₗ(ω) == ∫ dx exp(iωx) Pₗ(x)
    source
    SparseIR.giwMethod
    giw(polyFT, wn)

    Return model Green's function for reduced frequencies

    source
    SparseIR.iscentrosymmetricFunction
    iscentrosymmetric(kernel)

    Return true if kernel(x, y) == kernel(-x, -y) for all values of x and y in range. This allows the kernel to be block-diagonalized, speeding up the singular value expansion by a factor of 4. Defaults to false.

    source
    SparseIR.matop!Method
    matop!(buffer, mat, arr::AbstractArray, op, dim)

    Apply the operator op to the matrix mat and to the array arr along the first dimension (dim=1) or the last dimension (dim=N).

    source
    SparseIR.matop_along_dim!Method
    matop_along_dim!(buffer, mat, arr::AbstractArray, dim::Integer, op)

    Apply the operator op to the matrix mat and to the array arr along the dimension dim, writing the result to buffer.

    source
    SparseIR.movedimMethod
    movedim(arr::AbstractArray, src => dst)

    Move arr's dimension at src to dst while keeping the order of the remaining dimensions unchanged.

    source
    SparseIR.nsvalsMethod
    nsvals(hints)

    Upper bound for number of singular values.

    Upper bound on the number of singular values above the given threshold, i.e. where s[l] ≥ ε * first(s).

    source
    SparseIR.phase_stableMethod
    phase_stable(poly, wn)

    Phase factor for the piecewise Legendre to Matsubara transform.

    Compute the following phase factor in a stable way:

    exp.(iπ/2 * wn * cumsum(poly.Δx))
    source
    SparseIR.piecewiseMethod
    piecewise(rule, edges)

    Piecewise quadrature with the same quadrature rule, but scaled.

    source
    SparseIR.rescaleMethod
    rescale(basis::FiniteTempBasis, new_β)

    Return a basis for different temperature.

    Uses the same kernel with the same $ε$, but a different temperature. Note that this implies a different UV cutoff $ωmax$, since $Λ == β * ωmax$ stays constant.

    source
    SparseIR.segments_xMethod
    segments_x(sve_hints::AbstractSVEHints[, T])

    Segments for piecewise polynomials on the $x$ axis.

    List of segments on the $x$ axis for the associated piecewise polynomial. Should reflect the approximate position of roots of a high-order singular function in $x$.

    source
    SparseIR.segments_yMethod
    segments_y(sve_hints::AbstractSVEHints[, T])

    Segments for piecewise polynomials on the $y$ axis.

    List of segments on the $y$ axis for the associated piecewise polynomial. Should reflect the approximate position of roots of a high-order singular function in $y$.

    source
    SparseIR.shift_xmidMethod
    shift_xmid(knots, Δx)

    Return midpoint relative to the nearest integer plus a shift.

    Return the midpoints xmid of the segments, as pair (diff, shift), where shift is in (0, 1, -1) and diff is a float such that xmid == shift + diff to floating point accuracy.

    source
    SparseIR.significanceFunction
    significance(basis::AbstractBasis)

    Return vector σ, where 0 ≤ σ[i] ≤ 1 is the significance level of the i-th basis function. If ϵ is the desired accuracy to which to represent a propagator, then any basis function where σ[i] < ϵ can be neglected.

    For the IR basis, we simply have that σ[i] = s[i] / first(s).

    source
    SparseIR.splitMethod
    split(poly, x)

    Split segment.

    Find segment of poly's domain that covers x.

    source
    SparseIR.statisticsMethod
    statistics(basis::AbstractBasis)

    Quantum statistic (Statistics instance, Fermionic() or Bosonic()).

    source
    SparseIR.sve_hintsFunction
    sve_hints(kernel, ε)

    Provide discretisation hints for the SVE routines.

    Advises the SVE routines of discretisation parameters suitable in tranforming the (infinite) SVE into an (finite) SVD problem.

    See also AbstractSVEHints.

    source
    SparseIR.to_IRFunction
    to_IR(dlr::DiscreteLehmannRepresentation, g_dlr::AbstractArray, dims=1)

    From DLR to IR. g_dlr`: Expansion coefficients in DLR.

    source
    SparseIR.truncateMethod
    truncate(u, s, v; rtol=0.0, lmax=typemax(Int))

    Truncate singular value expansion.

    Arguments

    - `u`, `s`, `v`: Thin singular value expansion
    +)

    Perform truncated singular value expansion of a kernel.

    Perform a truncated singular value expansion (SVE) of an integral kernel kernel : [xmin, xmax] x [ymin, ymax] -> ℝ:

    kernel(x, y) == sum(s[l] * u[l](x) * v[l](y) for l in (1, 2, 3, ...)),

    where s[l] are the singular values, which are ordered in non-increasing fashion, u[l](x) are the left singular functions, which form an orthonormal system on [xmin, xmax], and v[l](y) are the right singular functions, which form an orthonormal system on [ymin, ymax].

    The SVE is mapped onto the singular value decomposition (SVD) of a matrix by expanding the kernel in piecewise Legendre polynomials (by default by using a collocation).

    Arguments

    • K::AbstractKernel: Integral kernel to take SVE from.

    • ε::Real: Accuracy target for the basis: attempt to have singular values down to a relative magnitude of ε, and have each singular value and singular vector be accurate to ε. A Twork with a machine epsilon of ε^2 or lower is required to satisfy this. Defaults to 2.2e-16 if xprec is available, and 1.5e-8 otherwise.

    • cutoff::Real: Relative cutoff for the singular values. A Twork with machine epsilon of cutoff is required to satisfy this. Defaults to a small multiple of the machine epsilon.

      Note that cutoff and ε serve distinct purposes. cutoff reprsents the accuracy to which the kernel is reproduced, whereas ε is the accuracy to which the singular values and vectors are guaranteed.

    • lmax::Integer: Maximum basis size. If given, only at most the lmax most significant singular values and associated singular functions are returned.

    • `n_gauss (int): Order of Legendre polynomials. Defaults to kernel hinted value.

    • Twork: Working data type. Defaults to a data type with machine epsilon of at mostε^2and at mostcutoff`, or otherwise most accurate data type available.

    • sve_strat::AbstractSVE: SVE to SVD translation strategy. Defaults to SamplingSVE, optionally wrapped inside of a CentrosymmSVE if the kernel is centrosymmetric.

    • svd_strat ('fast' or 'default' or 'accurate'): SVD solver. Defaults to fast (ID/RRQR) based solution when accuracy goals are moderate, and more accurate Jacobi-based algorithm otherwise.

    Returns: An SVEResult containing the truncated singular value expansion.

    source
    SparseIR.SamplingSVEType
    SamplingSVE <: AbstractSVE

    SVE to SVD translation by sampling technique [1].

    Maps the singular value expansion (SVE) of a kernel kernel onto the singular value decomposition of a matrix A. This is achieved by choosing two sets of Gauss quadrature rules: (x, wx) and (y, wy) and approximating the integrals in the SVE equations by finite sums. This implies that the singular values of the SVE are well-approximated by the singular values of the following matrix:

    A[i, j] = √(wx[i]) * K(x[i], y[j]) * √(wy[j])

    and the values of the singular functions at the Gauss sampling points can be reconstructed from the singular vectors u and v as follows:

    u[l,i] ≈ √(wx[i]) u[l](x[i])
    +v[l,j] ≈ √(wy[j]) u[l](y[j])

    [1] P. Hansen, Discrete Inverse Problems, Ch. 3.1

    source
    SparseIR.accuracyFunction
    accuracy(basis::AbstractBasis)

    Accuracy of the basis.

    Upper bound to the relative error of reprensenting a propagator with the given number of basis functions (number between 0 and 1).

    source
    SparseIR.canonicalize!Method
    canonicalize!(u, v)

    Canonicalize basis.

    Each SVD (u[l], v[l]) pair is unique only up to a global phase, which may differ from implementation to implementation and also platform. We fix that gauge by demanding u[l](1) > 0. This ensures a diffeomorphic connection to the Legendre polynomials as Λ → 0.

    source
    SparseIR.conv_radiusFunction
    conv_radius(kernel)

    Convergence radius of the Matsubara basis asymptotic model.

    For improved relative numerical accuracy, the IR basis functions on the Matsubara axis uhat(basis, n) can be evaluated from an asymptotic expression for abs(n) > conv_radius. If isinf(conv_radius), then the asymptotics are unused (the default).

    source
    SparseIR.default_matsubara_sampling_pointsFunction
    default_matsubara_sampling_points(basis::AbstractBasis; positive_only=false)

    Default sampling points on the imaginary frequency axis.

    Arguments

    • positive_only::Bool: Only return non-negative frequencies. This is useful if the object to be fitted is symmetric in Matsubura frequency, ĝ(ω) == conj(ĝ(-ω)), or, equivalently, real in imaginary time.
    source
    SparseIR.eval_matrixFunction
    eval_matrix(T, basis, x)

    Return evaluation matrix from coefficients to sampling points. T <: AbstractSampling.

    source
    SparseIR.find_extremaMethod
    find_extrema(polyFT::PiecewiseLegendreFT; part=nothing, grid=DEFAULT_GRID)

    Obtain extrema of Fourier-transformed polynomial.

    source
    SparseIR.finite_temp_basesFunction
    finite_temp_bases(β::Real, ωmax::Real, ε=nothing;
    +                  kernel=LogisticKernel(β * ωmax), sve_result=SVEResult(kernel; ε))

    Construct FiniteTempBasis objects for fermion and bosons using the same LogisticKernel instance.

    source
    SparseIR.from_IRFunction
    from_IR(dlr::DiscreteLehmannRepresentation, gl::AbstractArray, dims=1)

    From IR to DLR. gl`: Expansion coefficients in IR.

    source
    SparseIR.get_symmetrizedMethod
    get_symmetrized(kernel, sign)

    Construct a symmetrized version of kernel, i.e. kernel(x, y) + sign * kernel(x, -y).

    Beware!

    By default, this returns a simple wrapper over the current instance which naively performs the sum. You may want to override this to avoid cancellation.

    source
    SparseIR.get_tnlMethod
    get_tnl(l, w)

    Fourier integral of the l-th Legendre polynomial::

    Tₗ(ω) == ∫ dx exp(iωx) Pₗ(x)
    source
    SparseIR.giwMethod
    giw(polyFT, wn)

    Return model Green's function for reduced frequencies

    source
    SparseIR.iscentrosymmetricFunction
    iscentrosymmetric(kernel)

    Return true if kernel(x, y) == kernel(-x, -y) for all values of x and y in range. This allows the kernel to be block-diagonalized, speeding up the singular value expansion by a factor of 4. Defaults to false.

    source
    SparseIR.matop!Method
    matop!(buffer, mat, arr::AbstractArray, op, dim)

    Apply the operator op to the matrix mat and to the array arr along the first dimension (dim=1) or the last dimension (dim=N).

    source
    SparseIR.matop_along_dim!Method
    matop_along_dim!(buffer, mat, arr::AbstractArray, dim::Integer, op)

    Apply the operator op to the matrix mat and to the array arr along the dimension dim, writing the result to buffer.

    source
    SparseIR.movedimMethod
    movedim(arr::AbstractArray, src => dst)

    Move arr's dimension at src to dst while keeping the order of the remaining dimensions unchanged.

    source
    SparseIR.nsvalsMethod
    nsvals(hints)

    Upper bound for number of singular values.

    Upper bound on the number of singular values above the given threshold, i.e. where s[l] ≥ ε * first(s).

    source
    SparseIR.phase_stableMethod
    phase_stable(poly, wn)

    Phase factor for the piecewise Legendre to Matsubara transform.

    Compute the following phase factor in a stable way:

    exp.(iπ/2 * wn * cumsum(poly.Δx))
    source
    SparseIR.piecewiseMethod
    piecewise(rule, edges)

    Piecewise quadrature with the same quadrature rule, but scaled.

    source
    SparseIR.rescaleMethod
    rescale(basis::FiniteTempBasis, new_β)

    Return a basis for different temperature.

    Uses the same kernel with the same $ε$, but a different temperature. Note that this implies a different UV cutoff $ωmax$, since $Λ == β * ωmax$ stays constant.

    source
    SparseIR.segments_xMethod
    segments_x(sve_hints::AbstractSVEHints[, T])

    Segments for piecewise polynomials on the $x$ axis.

    List of segments on the $x$ axis for the associated piecewise polynomial. Should reflect the approximate position of roots of a high-order singular function in $x$.

    source
    SparseIR.segments_yMethod
    segments_y(sve_hints::AbstractSVEHints[, T])

    Segments for piecewise polynomials on the $y$ axis.

    List of segments on the $y$ axis for the associated piecewise polynomial. Should reflect the approximate position of roots of a high-order singular function in $y$.

    source
    SparseIR.shift_xmidMethod
    shift_xmid(knots, Δx)

    Return midpoint relative to the nearest integer plus a shift.

    Return the midpoints xmid of the segments, as pair (diff, shift), where shift is in (0, 1, -1) and diff is a float such that xmid == shift + diff to floating point accuracy.

    source
    SparseIR.significanceFunction
    significance(basis::AbstractBasis)

    Return vector σ, where 0 ≤ σ[i] ≤ 1 is the significance level of the i-th basis function. If ϵ is the desired accuracy to which to represent a propagator, then any basis function where σ[i] < ϵ can be neglected.

    For the IR basis, we simply have that σ[i] = s[i] / first(s).

    source
    SparseIR.splitMethod
    split(poly, x)

    Split segment.

    Find segment of poly's domain that covers x.

    source
    SparseIR.statisticsMethod
    statistics(basis::AbstractBasis)

    Quantum statistic (Statistics instance, Fermionic() or Bosonic()).

    source
    SparseIR.sve_hintsFunction
    sve_hints(kernel, ε)

    Provide discretisation hints for the SVE routines.

    Advises the SVE routines of discretisation parameters suitable in tranforming the (infinite) SVE into an (finite) SVD problem.

    See also AbstractSVEHints.

    source
    SparseIR.to_IRFunction
    to_IR(dlr::DiscreteLehmannRepresentation, g_dlr::AbstractArray, dims=1)

    From DLR to IR. g_dlr`: Expansion coefficients in DLR.

    source
    SparseIR.truncateMethod
    truncate(u, s, v; rtol=0.0, lmax=typemax(Int))

    Truncate singular value expansion.

    Arguments

    - `u`, `s`, `v`: Thin singular value expansion
     - `rtol`: Only singular values satisfying `s[l]/s[1] > rtol` are retained.
    -- `lmax`: At most the `lmax` most significant singular values are retained.
    source
    SparseIR.weight_funcFunction
    weight_func(kernel, statistics::Statistics)

    Return the weight function for the given statistics.

    • Fermion: w(x) == 1
    • Boson: w(y) == 1/tanh(Λ*y/2)
    source
    SparseIR.xrangeFunction
    xrange(kernel)

    Return a tuple $(x_\mathrm{min}, x_\mathrm{max})$ delimiting the range of allowed x values.

    source
    SparseIR.yrangeFunction
    yrange(kernel)

    Return a tuple $(y_\mathrm{min}, y_\mathrm{max})$ delimiting the range of allowed y values.

    source
    SparseIR.ΛFunction
    Λ(basis::AbstractBasis)
    -lambda(basis::AbstractBasis)

    Basis cutoff parameter, Λ = β * ωmax, or None if not present

    source
    SparseIR.βMethod
    β(basis::AbstractBasis)
    -beta(basis::AbstractBasis)

    Inverse temperature or nothing if unscaled basis.

    source
    SparseIR.ωmaxFunction
    ωmax(basis::AbstractBasis)
    -wmax(basis::AbstractBasis)

    Real frequency cutoff or nothing if unscaled basis.

    source
    SparseIR._LinAlg.rrqr!Method

    Truncated rank-revealing QR decomposition with full column pivoting.

    Decomposes a (m, n) matrix A into the product:

    A[:,piv] == Q * R

    where Q is an (m, k) isometric matrix, R is a (k, n) upper triangular matrix, piv is a permutation vector, and k is chosen such that the relative tolerance tol is met in the equality above.

    source
    SparseIR._LinAlg.svd2x2Method

    Perform the SVD of an arbitrary two-by-two matrix:

      [ a11  a12 ]  =  [  cu  -su ] [ smax     0 ] [  cv   sv ]
    -  [ a21  a22 ]     [  su   cu ] [    0  smin ] [ -sv   cv ]

    Note that smax and smin can be negative.

    source
    SparseIR._LinAlg.svd2x2Method

    Perform the SVD of upper triangular two-by-two matrix:

      [ f    g   ]  =  [  cu  -su ] [ smax     0 ] [  cv   sv ]
    -  [ 0    h   ]     [  su   cu ] [    0  smin ] [ -sv   cv ]

    Note that smax and smin can be negative.

    source
    SparseIR._LinAlg.tsvd!Method

    Truncated singular value decomposition.

    Decomposes an (m, n) matrix A into the product:

    A == U * (s .* VT)

    where U is a (m, k) matrix with orthogonal columns, VT is a (k, n) matrix with orthogonal rows and s are the singular values, a set of k nonnegative numbers in non-ascending order. The SVD is truncated in the sense that singular values below tol are discarded.

    source
    +- `lmax`: At most the `lmax` most significant singular values are retained.
    source
    SparseIR.weight_funcFunction
    weight_func(kernel, statistics::Statistics)

    Return the weight function for the given statistics.

    • Fermion: w(x) == 1
    • Boson: w(y) == 1/tanh(Λ*y/2)
    source
    SparseIR.xrangeFunction
    xrange(kernel)

    Return a tuple $(x_\mathrm{min}, x_\mathrm{max})$ delimiting the range of allowed x values.

    source
    SparseIR.yrangeFunction
    yrange(kernel)

    Return a tuple $(y_\mathrm{min}, y_\mathrm{max})$ delimiting the range of allowed y values.

    source
    SparseIR.ΛFunction
    Λ(basis::AbstractBasis)
    +lambda(basis::AbstractBasis)

    Basis cutoff parameter, Λ = β * ωmax, or None if not present

    source
    SparseIR.βMethod
    β(basis::AbstractBasis)
    +beta(basis::AbstractBasis)

    Inverse temperature or nothing if unscaled basis.

    source
    SparseIR.ωmaxFunction
    ωmax(basis::AbstractBasis)
    +wmax(basis::AbstractBasis)

    Real frequency cutoff or nothing if unscaled basis.

    source
    SparseIR._LinAlg.rrqr!Method

    Truncated rank-revealing QR decomposition with full column pivoting.

    Decomposes a (m, n) matrix A into the product:

    A[:,piv] == Q * R

    where Q is an (m, k) isometric matrix, R is a (k, n) upper triangular matrix, piv is a permutation vector, and k is chosen such that the relative tolerance tol is met in the equality above.

    source
    SparseIR._LinAlg.svd2x2Method

    Perform the SVD of an arbitrary two-by-two matrix:

      [ a11  a12 ]  =  [  cu  -su ] [ smax     0 ] [  cv   sv ]
    +  [ a21  a22 ]     [  su   cu ] [    0  smin ] [ -sv   cv ]

    Note that smax and smin can be negative.

    source
    SparseIR._LinAlg.svd2x2Method

    Perform the SVD of upper triangular two-by-two matrix:

      [ f    g   ]  =  [  cu  -su ] [ smax     0 ] [  cv   sv ]
    +  [ 0    h   ]     [  su   cu ] [    0  smin ] [ -sv   cv ]

    Note that smax and smin can be negative.

    source
    SparseIR._LinAlg.tsvd!Method

    Truncated singular value decomposition.

    Decomposes an (m, n) matrix A into the product:

    A == U * (s .* VT)

    where U is a (m, k) matrix with orthogonal columns, VT is a (k, n) matrix with orthogonal rows and s are the singular values, a set of k nonnegative numbers in non-ascending order. The SVD is truncated in the sense that singular values below tol are discarded.

    source
    diff --git a/dev/public/index.html b/dev/public/index.html index c42aeb2..5bdcce8 100644 --- a/dev/public/index.html +++ b/dev/public/index.html @@ -1,7 +1,7 @@ -Public · SparseIR.jl

    Public names index

    SparseIR.AugmentedBasisType
    AugmentedBasis <: AbstractBasis

    Augmented basis on the imaginary-time/frequency axis.

    Groups a set of additional functions, augmentations, with a given basis. The augmented functions then form the first basis functions, while the rest is provided by the regular basis, i.e.:

    u[l](x) == l < naug ? augmentations[l](x) : basis.u[l-naug](x),

    where naug = length(augmentations) is the number of added basis functions through augmentation. Similar expressions hold for Matsubara frequencies.

    Augmentation is useful in constructing bases for vertex-like quantities such as self-energies [wallerberger2021] and when constructing a two-point kernel that serves as a base for multi-point functions [shinaoka2018].

    Warning

    Bases augmented with TauConst and TauLinear tend to be poorly conditioned. Care must be taken while fitting and compactness should be enforced if possible to regularize the problem.

    While vertex bases, i.e. bases augmented with MatsubaraConst, stay reasonably well-conditioned, it is still good practice to treat the Hartree–Fock term separately rather than including it in the basis, if possible.

    See also: MatsubaraConst for vertex basis [wallerberger2021], TauConst, TauLinear for multi-point [shinaoka2018]

    source
    SparseIR.DiscreteLehmannRepresentationType
    DiscreteLehmannRepresentation <: AbstractBasis

    Discrete Lehmann representation (DLR) with poles selected according to extrema of IR.

    This class implements a variant of the discrete Lehmann representation (DLR) 1. Instead of a truncated singular value expansion of the analytic continuation kernel $K$ like the IR, the discrete Lehmann representation is based on a "sketching" of $K$. The resulting basis is a linear combination of discrete set of poles on the real-frequency axis, continued to the imaginary-frequency axis:

     G(iv) == sum(a[i] / (iv - w[i]) for i in range(L))

    Warning The poles on the real-frequency axis selected for the DLR are based on a rank-revealing decomposition, which offers accuracy guarantees. Here, we instead select the pole locations based on the zeros of the IR basis functions on the real axis, which is a heuristic. We do not expect that difference to matter, but please don't blame the DLR authors if we were wrong :-)

    source
    SparseIR.FiniteTempBasisType
    FiniteTempBasis <: AbstractBasis

    Intermediate representation (IR) basis for given temperature.

    For a continuation kernel K from real frequencies, ω ∈ [-ωmax, ωmax], to imaginary time, τ ∈ [0, β], this type stores the truncated singular value expansion or IR basis:

    K(τ, ω) ≈ sum(u[l](τ) * s[l] * v[l](ω) for l in 1:L)

    This basis is inferred from a reduced form by appropriate scaling of the variables.

    Fields

    • u::PiecewiseLegendrePolyVector: Set of IR basis functions on the imaginary time (tau) axis. These functions are stored as piecewise Legendre polynomials.

      To obtain the value of all basis functions at a point or a array of points x, you can call the function u(x). To obtain a single basis function, a slice or a subset l, you can use u[l].

    • uhat::PiecewiseLegendreFT: Set of IR basis functions on the Matsubara frequency (wn) axis. These objects are stored as a set of Bessel functions.

      To obtain the value of all basis functions at a Matsubara frequency or a array of points wn, you can call the function uhat(wn). Note that we expect reduced frequencies, which are simply even/odd numbers for bosonic/fermionic objects. To obtain a single basis function, a slice or a subset l, you can use uhat[l].

    • s: Vector of singular values of the continuation kernel

    • v::PiecewiseLegendrePoly: Set of IR basis functions on the real frequency (w) axis. These functions are stored as piecewise Legendre polynomials.

      To obtain the value of all basis functions at a point or a array of points w, you can call the function v(w). To obtain a single basis function, a slice or a subset l, you can use v[l].

    source
    SparseIR.FiniteTempBasisMethod
    FiniteTempBasis{S}(β, ωmax, ε=nothing; max_size=nothing, args...)

    Construct a finite temperature basis suitable for the given S (Fermionic or Bosonic) and cutoffs β and ωmax.

    source
    SparseIR.FiniteTempBasisSetType
    FiniteTempBasisSet

    Type for holding IR bases and sparse-sampling objects.

    An object of this type holds IR bases for fermions and bosons and associated sparse-sampling objects.

    Fields

    • basis_f::FiniteTempBasis: Fermion basis
    • basis_b::FiniteTempBasis: Boson basis
    • tau::Vector{Float64}: Sampling points in the imaginary-time domain
    • wn_f::Vector{Int}: Sampling fermionic frequencies
    • wn_b::Vector{Int}: Sampling bosonic frequencies
    • smpltauf::TauSampling: Sparse sampling for tau & fermion
    • smpltaub::TauSampling: Sparse sampling for tau & boson
    • smplwnf::MatsubaraSampling: Sparse sampling for Matsubara frequency & fermion
    • smplwnb::MatsubaraSampling: Sparse sampling for Matsubara frequency & boson
    • sve_result::Tuple{PiecewiseLegendrePoly,Vector{Float64},PiecewiseLegendrePoly}: Results of SVE

    Getters

    • beta::Float64: Inverse temperature
    • ωmax::Float64: Cut-off frequency
    source
    SparseIR.LogisticKernelType
    LogisticKernel <: AbstractKernel

    Fermionic/bosonic analytical continuation kernel.

    In dimensionless variables $x = 2 τ/β - 1$, $y = β ω/Λ$, the integral kernel is a function on $[-1, 1] × [-1, 1]$:

    \[ K(x, y) = \frac{e^{-Λ y (x + 1) / 2}}{1 + e^{-Λ y}}\]

    LogisticKernel is a fermionic analytic continuation kernel. Nevertheless, one can model the $τ$ dependence of a bosonic correlation function as follows:

    \[ ∫ \frac{e^{-Λ y (x + 1) / 2}}{1 - e^{-Λ y}} ρ(y) dy = ∫ K(x, y) ρ'(y) dy,\]

    with

    \[ ρ'(y) = w(y) ρ(y),\]

    where the weight function is given by

    \[ w(y) = \frac{1}{\tanh(Λ y/2)}.\]

    source
    SparseIR.MatsubaraFreqType
    MatsubaraFreq(n)

    Prefactor n of the Matsubara frequency ω = n*π/β

    Struct representing the Matsubara frequency ω entering the Fourier transform of a propagator G(τ) on imaginary time τ to its Matsubara equivalent Ĝ(iω) on the imaginary-frequency axis:

            β
    +Public · SparseIR.jl

    Public names index

    SparseIR.AugmentedBasisType
    AugmentedBasis <: AbstractBasis

    Augmented basis on the imaginary-time/frequency axis.

    Groups a set of additional functions, augmentations, with a given basis. The augmented functions then form the first basis functions, while the rest is provided by the regular basis, i.e.:

    u[l](x) == l < naug ? augmentations[l](x) : basis.u[l-naug](x),

    where naug = length(augmentations) is the number of added basis functions through augmentation. Similar expressions hold for Matsubara frequencies.

    Augmentation is useful in constructing bases for vertex-like quantities such as self-energies [wallerberger2021] and when constructing a two-point kernel that serves as a base for multi-point functions [shinaoka2018].

    Warning

    Bases augmented with TauConst and TauLinear tend to be poorly conditioned. Care must be taken while fitting and compactness should be enforced if possible to regularize the problem.

    While vertex bases, i.e. bases augmented with MatsubaraConst, stay reasonably well-conditioned, it is still good practice to treat the Hartree–Fock term separately rather than including it in the basis, if possible.

    See also: MatsubaraConst for vertex basis [wallerberger2021], TauConst, TauLinear for multi-point [shinaoka2018]

    source
    SparseIR.DiscreteLehmannRepresentationType
    DiscreteLehmannRepresentation <: AbstractBasis

    Discrete Lehmann representation (DLR) with poles selected according to extrema of IR.

    This class implements a variant of the discrete Lehmann representation (DLR) 1. Instead of a truncated singular value expansion of the analytic continuation kernel $K$ like the IR, the discrete Lehmann representation is based on a "sketching" of $K$. The resulting basis is a linear combination of discrete set of poles on the real-frequency axis, continued to the imaginary-frequency axis:

     G(iv) == sum(a[i] / (iv - w[i]) for i in range(L))

    Warning The poles on the real-frequency axis selected for the DLR are based on a rank-revealing decomposition, which offers accuracy guarantees. Here, we instead select the pole locations based on the zeros of the IR basis functions on the real axis, which is a heuristic. We do not expect that difference to matter, but please don't blame the DLR authors if we were wrong :-)

    source
    SparseIR.FiniteTempBasisType
    FiniteTempBasis <: AbstractBasis

    Intermediate representation (IR) basis for given temperature.

    For a continuation kernel K from real frequencies, ω ∈ [-ωmax, ωmax], to imaginary time, τ ∈ [0, β], this type stores the truncated singular value expansion or IR basis:

    K(τ, ω) ≈ sum(u[l](τ) * s[l] * v[l](ω) for l in 1:L)

    This basis is inferred from a reduced form by appropriate scaling of the variables.

    Fields

    • u::PiecewiseLegendrePolyVector: Set of IR basis functions on the imaginary time (tau) axis. These functions are stored as piecewise Legendre polynomials.

      To obtain the value of all basis functions at a point or a array of points x, you can call the function u(x). To obtain a single basis function, a slice or a subset l, you can use u[l].

    • uhat::PiecewiseLegendreFT: Set of IR basis functions on the Matsubara frequency (wn) axis. These objects are stored as a set of Bessel functions.

      To obtain the value of all basis functions at a Matsubara frequency or a array of points wn, you can call the function uhat(wn). Note that we expect reduced frequencies, which are simply even/odd numbers for bosonic/fermionic objects. To obtain a single basis function, a slice or a subset l, you can use uhat[l].

    • s: Vector of singular values of the continuation kernel

    • v::PiecewiseLegendrePoly: Set of IR basis functions on the real frequency (w) axis. These functions are stored as piecewise Legendre polynomials.

      To obtain the value of all basis functions at a point or a array of points w, you can call the function v(w). To obtain a single basis function, a slice or a subset l, you can use v[l].

    source
    SparseIR.FiniteTempBasisMethod
    FiniteTempBasis{S}(β, ωmax, ε=nothing; max_size=nothing, args...)

    Construct a finite temperature basis suitable for the given S (Fermionic or Bosonic) and cutoffs β and ωmax.

    source
    SparseIR.FiniteTempBasisSetType
    FiniteTempBasisSet

    Type for holding IR bases and sparse-sampling objects.

    An object of this type holds IR bases for fermions and bosons and associated sparse-sampling objects.

    Fields

    • basis_f::FiniteTempBasis: Fermion basis
    • basis_b::FiniteTempBasis: Boson basis
    • tau::Vector{Float64}: Sampling points in the imaginary-time domain
    • wn_f::Vector{Int}: Sampling fermionic frequencies
    • wn_b::Vector{Int}: Sampling bosonic frequencies
    • smpltauf::TauSampling: Sparse sampling for tau & fermion
    • smpltaub::TauSampling: Sparse sampling for tau & boson
    • smplwnf::MatsubaraSampling: Sparse sampling for Matsubara frequency & fermion
    • smplwnb::MatsubaraSampling: Sparse sampling for Matsubara frequency & boson
    • sve_result::Tuple{PiecewiseLegendrePoly,Vector{Float64},PiecewiseLegendrePoly}: Results of SVE

    Getters

    • beta::Float64: Inverse temperature
    • ωmax::Float64: Cut-off frequency
    source
    SparseIR.LogisticKernelType
    LogisticKernel <: AbstractKernel

    Fermionic/bosonic analytical continuation kernel.

    In dimensionless variables $x = 2 τ/β - 1$, $y = β ω/Λ$, the integral kernel is a function on $[-1, 1] × [-1, 1]$:

    \[ K(x, y) = \frac{e^{-Λ y (x + 1) / 2}}{1 + e^{-Λ y}}\]

    LogisticKernel is a fermionic analytic continuation kernel. Nevertheless, one can model the $τ$ dependence of a bosonic correlation function as follows:

    \[ ∫ \frac{e^{-Λ y (x + 1) / 2}}{1 - e^{-Λ y}} ρ(y) dy = ∫ K(x, y) ρ'(y) dy,\]

    with

    \[ ρ'(y) = w(y) ρ(y),\]

    where the weight function is given by

    \[ w(y) = \frac{1}{\tanh(Λ y/2)}.\]

    source
    SparseIR.MatsubaraFreqType
    MatsubaraFreq(n)

    Prefactor n of the Matsubara frequency ω = n*π/β

    Struct representing the Matsubara frequency ω entering the Fourier transform of a propagator G(τ) on imaginary time τ to its Matsubara equivalent Ĝ(iω) on the imaginary-frequency axis:

            β
     Ĝ(iω) = ∫  dτ exp(iωτ) G(τ)      with    ω = n π/β,
    -        0

    where β is inverse temperature and by convention we include the imaginary unit in the frequency argument, i.e, Ĝ(iω). The frequencies depend on the statistics of the propagator, i.e., we have that:

    G(τ - β) = ± G(τ)

    where + is for bosons and - is for fermions. The frequencies are restricted accordingly.

    • Bosonic frequency (S == Fermionic): n even (periodic in β)
    • Fermionic frequency (S == Bosonic): n odd (anti-periodic in β)
    source
    SparseIR.MatsubaraSamplingType
    MatsubaraSampling <: AbstractSampling

    Sparse sampling in Matsubara frequencies.

    Allows the transformation between the IR basis and a set of sampling points in (scaled/unscaled) imaginary frequencies.

    source
    SparseIR.MatsubaraSamplingMethod
    MatsubaraSampling(basis; positive_only=false,
    -                  sampling_points=default_matsubara_sampling_points(basis; positive_only))

    Construct a MatsubaraSampling object. If not given, the sampling_points are chosen as the (discrete) extrema of the highest-order basis function in Matsubara. This turns out to be close to optimal with respect to conditioning for this size (within a few percent).

    By setting positive_only=true, one assumes that functions to be fitted are symmetric in Matsubara frequency, i.e.:

    \[ Ĝ(iν) = conj(Ĝ(-iν))\]

    or equivalently, that they are purely real in imaginary time. In this case, sparse sampling is performed over non-negative frequencies only, cutting away half of the necessary sampling space.

    source
    SparseIR.RegularizedBoseKernelType
    RegularizedBoseKernel <: AbstractKernel

    Regularized bosonic analytical continuation kernel.

    In dimensionless variables $x = 2 τ/β - 1$, $y = β ω/Λ$, the fermionic integral kernel is a function on $[-1, 1] × [-1, 1]$:

    \[ K(x, y) = y \frac{e^{-Λ y (x + 1) / 2}}{e^{-Λ y} - 1}\]

    Care has to be taken in evaluating this expression around $y = 0$.

    source
    SparseIR.TauConstType
    TauConst <: AbstractAugmentation

    Constant in imaginary time/discrete delta in frequency.

    source
    SparseIR.TauLinearType
    TauLinear <: AbstractAugmentation

    Linear function in imaginary time, antisymmetric around β/2.

    source
    SparseIR.TauSamplingType
    TauSampling <: AbstractSampling

    Sparse sampling in imaginary time.

    Allows the transformation between the IR basis and a set of sampling points in (scaled/unscaled) imaginary time.

    source
    SparseIR.TauSamplingMethod
    TauSampling(basis[; sampling_points])

    Construct a TauSampling object. If not given, the sampling_points are chosen as the extrema of the highest-order basis function in imaginary time. This turns out to be close to optimal with respect to conditioning for this size (within a few percent).

    source
    SparseIR.evaluate!Method
    evaluate!(buffer::AbstractArray{T,N}, sampling, al; dim=1) where {T,N}

    Like evaluate, but write the result to buffer. Please use dim = 1 or N to avoid allocating large temporary arrays internally.

    source
    SparseIR.evaluateMethod
    evaluate(sampling, al; dim=1)

    Evaluate the basis coefficients al at the sparse sampling points.

    source
    SparseIR.fit!Method
    fit!(buffer::Array{S,N}, smpl::AbstractSampling, al::Array{T,N}; 
    -    dim=1, workarr::Vector{S}) where {S,T,N}

    Like fit, but write the result to buffer. Use dim = 1 or dim = N to avoid allocating large temporary arrays internally. The length of workarr cannot be smaller than SparseIR.workarrlength(smpl, al).

    source
    SparseIR.fitMethod
    fit(sampling, al::AbstractArray{T,N}; dim=1)

    Fit basis coefficients from the sparse sampling points Please use dim = 1 or N to avoid allocating large temporary arrays internally.

    source
    SparseIR.overlapMethod
    overlap(poly::PiecewiseLegendrePoly, f; 
    -    rtol=eps(T), return_error=false, maxevals=10^4, points=T[])

    Evaluate overlap integral of poly with arbitrary function f.

    Given the function f, evaluate the integral

    ∫ dx f(x) poly(x)

    using adaptive Gauss-Legendre quadrature.

    points is a sequence of break points in the integration interval where local difficulties of the integrand may occur (e.g. singularities, discontinuities).

    source
    + 0

    where β is inverse temperature and by convention we include the imaginary unit in the frequency argument, i.e, Ĝ(iω). The frequencies depend on the statistics of the propagator, i.e., we have that:

    G(τ - β) = ± G(τ)

    where + is for bosons and - is for fermions. The frequencies are restricted accordingly.

    • Bosonic frequency (S == Fermionic): n even (periodic in β)
    • Fermionic frequency (S == Bosonic): n odd (anti-periodic in β)
    source
    SparseIR.MatsubaraSamplingType
    MatsubaraSampling <: AbstractSampling

    Sparse sampling in Matsubara frequencies.

    Allows the transformation between the IR basis and a set of sampling points in (scaled/unscaled) imaginary frequencies.

    source
    SparseIR.MatsubaraSamplingMethod
    MatsubaraSampling(basis; positive_only=false,
    +                  sampling_points=default_matsubara_sampling_points(basis; positive_only))

    Construct a MatsubaraSampling object. If not given, the sampling_points are chosen as the (discrete) extrema of the highest-order basis function in Matsubara. This turns out to be close to optimal with respect to conditioning for this size (within a few percent).

    By setting positive_only=true, one assumes that functions to be fitted are symmetric in Matsubara frequency, i.e.:

    \[ Ĝ(iν) = conj(Ĝ(-iν))\]

    or equivalently, that they are purely real in imaginary time. In this case, sparse sampling is performed over non-negative frequencies only, cutting away half of the necessary sampling space.

    source
    SparseIR.RegularizedBoseKernelType
    RegularizedBoseKernel <: AbstractKernel

    Regularized bosonic analytical continuation kernel.

    In dimensionless variables $x = 2 τ/β - 1$, $y = β ω/Λ$, the fermionic integral kernel is a function on $[-1, 1] × [-1, 1]$:

    \[ K(x, y) = y \frac{e^{-Λ y (x + 1) / 2}}{e^{-Λ y} - 1}\]

    Care has to be taken in evaluating this expression around $y = 0$.

    source
    SparseIR.TauConstType
    TauConst <: AbstractAugmentation

    Constant in imaginary time/discrete delta in frequency.

    source
    SparseIR.TauLinearType
    TauLinear <: AbstractAugmentation

    Linear function in imaginary time, antisymmetric around β/2.

    source
    SparseIR.TauSamplingType
    TauSampling <: AbstractSampling

    Sparse sampling in imaginary time.

    Allows the transformation between the IR basis and a set of sampling points in (scaled/unscaled) imaginary time.

    source
    SparseIR.TauSamplingMethod
    TauSampling(basis[; sampling_points])

    Construct a TauSampling object. If not given, the sampling_points are chosen as the extrema of the highest-order basis function in imaginary time. This turns out to be close to optimal with respect to conditioning for this size (within a few percent).

    source
    SparseIR.evaluate!Method
    evaluate!(buffer::AbstractArray{T,N}, sampling, al; dim=1) where {T,N}

    Like evaluate, but write the result to buffer. Please use dim = 1 or N to avoid allocating large temporary arrays internally.

    source
    SparseIR.evaluateMethod
    evaluate(sampling, al; dim=1)

    Evaluate the basis coefficients al at the sparse sampling points.

    source
    SparseIR.fit!Method
    fit!(buffer::Array{S,N}, smpl::AbstractSampling, al::Array{T,N}; 
    +    dim=1, workarr::Vector{S}) where {S,T,N}

    Like fit, but write the result to buffer. Use dim = 1 or dim = N to avoid allocating large temporary arrays internally. The length of workarr cannot be smaller than SparseIR.workarrlength(smpl, al).

    source
    SparseIR.fitMethod
    fit(sampling, al::AbstractArray{T,N}; dim=1)

    Fit basis coefficients from the sparse sampling points Please use dim = 1 or N to avoid allocating large temporary arrays internally.

    source
    SparseIR.overlapMethod
    overlap(poly::PiecewiseLegendrePoly, f; 
    +    rtol=eps(T), return_error=false, maxevals=10^4, points=T[])

    Evaluate overlap integral of poly with arbitrary function f.

    Given the function f, evaluate the integral

    ∫ dx f(x) poly(x)

    using adaptive Gauss-Legendre quadrature.

    points is a sequence of break points in the integration interval where local difficulties of the integrand may occur (e.g. singularities, discontinuities).

    source