From 4622cbd6e1ef2ef56aed2fa0d2ae9b4546bbee47 Mon Sep 17 00:00:00 2001 From: Qiming Sun Date: Fri, 12 Feb 2016 00:01:02 -0500 Subject: [PATCH 1/2] Add documents and exampels for: DMRG-NEVPT2, DMRG-CASSCF via PySCF --- CHANGELOG.md | 27 ++++ docs/source/CHANGELOG.rst | 1 + docs/source/benchmark.rst | 33 ++++ docs/source/build.rst | 51 ++++++ docs/source/examples-with-pyscf.rst | 232 ++++++++++++++++++++++++++++ docs/source/examples.rst | 8 +- docs/source/index.rst | 3 + docs/source/overview.rst | 4 +- 8 files changed, 354 insertions(+), 5 deletions(-) create mode 100644 CHANGELOG.md create mode 120000 docs/source/CHANGELOG.rst create mode 100644 docs/source/benchmark.rst create mode 100644 docs/source/examples-with-pyscf.rst diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..34e23af7 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,27 @@ +Change Log +********** + +## [1.1 alpha] - 2016-02-12 + +* New NEVPT2 implementation based on MPS perturber + + +## [1.0.1] - 2015-07-19 + +* fixed mpi runtime error; processors now write intermediates files into different directories under scratch; +* now printing summed cpu timings from all processors; +* fixed error of orbital reordering in restart_npdm; +* removed bugs in PT calculations; +* test reference data now cleaned and consistent. + + +## [1.0.0] - 2015-03-27 + +* warm-up step faster +* npdm up to fourth-order as well as transition pdm up to second-order + + +## [0.9.7] - 2013-02-17 + + +## [0.9] - 2012-09-21 diff --git a/docs/source/CHANGELOG.rst b/docs/source/CHANGELOG.rst new file mode 120000 index 00000000..699cc9e7 --- /dev/null +++ b/docs/source/CHANGELOG.rst @@ -0,0 +1 @@ +../../CHANGELOG.md \ No newline at end of file diff --git a/docs/source/benchmark.rst b/docs/source/benchmark.rst new file mode 100644 index 00000000..cc7945d0 --- /dev/null +++ b/docs/source/benchmark.rst @@ -0,0 +1,33 @@ +.. _benchmark: + + +Benchmark +********* + +========= ============================== +Platform +========= ============================== +CPU 4 Intel E5-2670 @ 2.6 GB +Memory 128 GB DDR3 +OS Custom Redhat 6.6 +BLAS MKL 11.0 +Compiler GCC 4.8.2 +========= ============================== + + +Computation cost +================ + +================================= ============= ======================== ============= + Problem size Program CPU Memory +================================= ============= ======================== ============= + 439 AOs, CAS(16e,20o), M = 1000 DMRG-CASCI ~ 1 h (16 core) < 1 GB/core + \ DMRG-CASSCF ~ 1.5 h/iter (16 core) < 1 GB/core + \ NEVPT2 48 h (8 core) ~12 GB/core + \ MPS-NEVPT2 5.5 h (16 core) < 4 GB/core + 439 AOs, CAS(22e,27o), M = 1000 DMRG-CASCI 6 h (16 core) < 2 GB/core + \ DMRG-CASSCF 9 h/iter (16 core) < 2 GB/core + \ MPS-NEVPT2 29 h (16 core) ~10 GB/core + 760 AOs, CAS(30e,36o), M = 1500 DMRG-CASCI 24 h (16 core) < 2 GB/core +================================= ============= ======================== ============= + diff --git a/docs/source/build.rst b/docs/source/build.rst index ce0549ec..0e28e3e0 100644 --- a/docs/source/build.rst +++ b/docs/source/build.rst @@ -47,6 +47,56 @@ When the makefile is configured, run in the directory ``./Block``:: The successful compilation generates the executable ``block.spin_adapted``, static and shared DMRG libraries ``libqcdmrg.a`` and ``libqcdmrg.so``. + +.. _pyscf-itrf: + +Interface to PySCF package +-------------------------- + +The electronic structure Python module `PySCF `_ +provided an interface to run `BLOCK` code. If you would like to run +DMRG-SCF, DMRG-NEVPT2 etc with PySCF package, you need create a pyscf +config file ``/path/to/pyscf/future/dmrgscf/settings.py`` and add the +following settings in it:: + + BLOCKEXE = "/path/to/Block/block.spin_adapted" + BLOCKSCRATCHDIR = "/path/to/scratch" + MPIPREFIX = "mpirun" + +Note the parameter ``MPIPREFIX`` should be adjusted according to your +job scheduler, eg:: + + # For OpenPBS/Torque + MPIPREFIX = "mpirun" + # For SLURM + MPIPREFIX = "srun" + +If calculation is carried out on interactive node, eg with 4 processors, +the setting looks like:: + + MPIPREFIX = "mpirun -n 4" + +If ``BLOCK`` and ``PySCF`` are installed successfully, a simple DMRG-SCF +calculation can be input in Python interpereter:: + + >>> from pyscf import gto, scf, dmrgscf + >>> mf = gto.M(atom='C 0 0 0; C 0 0 1', basis='ccpvdz').apply(scf.RHF).run() + >>> mc = dmrgscf.dmrgci.DMRGSCF(mf, 6, 6) + >>> mc.run() + +DMRG-NEVPT2 calculation can be applied:: + + >>> from pyscf import mrpt + >>> mrpt.nevpt2.sc_nevpt(mc) + +Optionally, if `MPI4Py `_ was installed, the efficient +DMRG-NEVPT2 implementation can be used, eg:: + + >>> from pyscf import mrpt + >>> dmrgscf.dmrgci.DMRG_MPS_NEVPT(mc) + >>> mrpt.nevpt2.sc_nevpt(mc, useMPS=True) + + How to run `BLOCK` ================== @@ -70,3 +120,4 @@ Testjobs The tests require Python to be installed on the system. + diff --git a/docs/source/examples-with-pyscf.rst b/docs/source/examples-with-pyscf.rst new file mode 100644 index 00000000..05c8be80 --- /dev/null +++ b/docs/source/examples-with-pyscf.rst @@ -0,0 +1,232 @@ +BLOCK for Electronic Structure Calculations +******************************************* + +Using PySCF +=========== + +`PySCF `_ package provided +an interface to call ``BLOCK`` code for DMRG-CASSCF and DMRG-NEVPT2 calculations. +See the :ref:`installation ` section to setup the PySCF/BLOCK interface. +In the section, we will demonstrate how to use ``BLOCK`` and ``PySCF`` packages +to study static and dynamic correlations with DMRG-CASSCF/DMRG-CASCI and +DMRG-MRPT solvers for large active space problems. + +DMRG-CASSCF +----------- + +We start from a simple example:: + + $ cat example1.py + from pyscf import gto, scf, dmrgscf + mf = gto.M(atom="C 0 0 0; C 0 0 1", basis="ccpvdz").apply(scf.RHF).run() + mc = dmrgscf.dmrgci.DMRGSCF(mf, 6, 6) + mc.run() + +Executing this script in command line:: + + $ python example1.py + +will start BLOCK program with 4 processors, assuming that you have the +configuration ``dmrgscf.settings.MPIPREFIX = "mpirun -n 4"`` + +.. note:: Don't put the prefix ``mpirun -n`` in the command line to start python. + +The number of processors can be dynamically adjusted using ``sys.argv`` +parameter, eg:: + + $ cat example2.py + import sys + from pyscf import gto, scf, dmrgscf + dmrgscf.dmrgci.settings.MPIPREFIX = "mpirun -n %s" % sys.argv[0] + mf = gto.M(atom="C 0 0 0; C 0 0 1", basis="ccpvdz").apply(scf.RHF).run() + mc = dmrgscf.dmrgci.DMRGSCF(mf, 6, 6) + mc.run() + + $ python example1.py 4 + +In the above examples, ``gto``, ``scf`` are the standard modules provided by +PySCF package. For the use of PySCF package, we refer the reader to the +`PySCF documentation `_. ``dmrgscf`` module is the code +where we put Block interface. It is designed to control all Block input +parameters, access the results from Block, including but not limiting to +regular DMRG calculation, N-particle density matrices (up to 4-PDM) and +transition density matrices (up to 2-PDM), DMRG-NEVPT2 calculations. + +The standard way to start a DMRG-CASSCF calculation needs to modify the +``fcisolver`` attribute of CASSCF or CASCI object:: + + from pyscf import gto, scf, mcscf, dmrgscf + mol = gto.M(atom="C 0 0 0; C 0 0 1", basis="ccpvdz") + mf = scf.RHF(mol) + mf.run() + norb = 6 + nelec = 6 + mc = mcscf.CASSCF(mf, norb, nelec) + dmrgsolver = dmrgscf.dmrgci.DMRGCI(mol) + dmrgsolver.maxM = 50 + dmrgsolver.maxIter = 10 + mc.fcisolver = dmrgscfsolver + mc.run() + + mc = mcscf.CASCI(mf, norb, nelec) + mc.fcisolver = dmrgscf.dmrgci.DMRGCI(mol) + mc.run() + +``dmrgsolver = dmrgscf.dmrgci.DMRGCI(mol)`` created an object ``dmrgsolver`` to +hold Block input parameters and runtime environments. By default, +``maxM=1000`` is applied. One can control the DMRG calculation by changing +the settings of ``dmrgsolver`` object, eg to set the sweep schedule:: + + dmrgsolver.scheduleSweeps = [0, 4, 8, 12, 16, 20, 24, 28, 30, 34] + dmrgsolver.scheduleMaxMs = [200, 400, 800, 1200, 2000, 4000, 3000, 2000, 1000, 500] + dmrgsolver.scheduleTols = [0.0001, 0.0001, 0.0001, 0.0001, 1e-5, 1e-6, 1e-7, 1e-7, 1e-7, 1e-7] + dmrgsolver.scheduleNoises = [0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0, 0.0, 0.0, 0.0] + dmrgsolver.twodot_to_onedot = 38 + +For more details of the default settings and control parameters, we refer to +the `source code `_ +and the corresponding Block code `keywords list `_. + +To make the embedded DMRG solver work more efficiently in CASSCF iteration, one +need carefully tune the DMRG runtime parameters. It means more input arguments +and options to be specified for ``dmrgsolver`` object. To simplify the input, +we provided a shortcut function ``DMRGSCF`` in the ``dmrgscf`` module, as shown +by the first example ``example1.py``. In the ``DMRGSCF`` function, we created +a CASSCF object, and changed its ``fcisolver`` to ``dmrgsolver``, then hook a +function which is used to dynamically adjust sweep scheduler to CASSCF object. +Now, the DMRG-CASSCF method can be applied in one line of input:: + + mc = dmrgscf.dmrgci.DMRGSCF(mf, norb, nelec).run() + +The DMRG-CASSCF results such as orbital coefficients, natural occupancy etc. +will be held in the ``mc`` object. The DMRG wave function will be stored on +disk, more precisely, in the directory specified by ``mc.fcisolver.scratchDirectory``. +Apparently, we can modify its value to change the place to save the DMRG +wave function. The default directory is read from the dmrgscf configuration +parameter ``dmrgscf.settings.BLOCKSCRATCHDIR``. + +.. note:: Be sure the mc.fcisolver.scratchDirectory is properly assigned. + Since all DMRGCI object by default uses the same BLOCKSCRATCHDIR settings, + it's easy to cause name conflicts on the scratch directory, especially when + two DMRG-CASSCF calculations are executed on the same node. + +.. note:: Usually, the DMRG wave function is very large. Be sure that the + disk which BLOCKSCRATCHDIR pointed to has enough space. + +Due to the complexity of multi-configuration model, it's common that we need +interrupted the CASSCF calculation and restart a new one with modified +parameters. To restart the CASSCF calculation, we need the information such as +orbital coefficients and active space CI wave function of last simulation. +Although the orbital coefficients can be save/load through PySCF `chkfile` +module (see `PySCF documentation `_), the CI wave +function are not saved by PySCF. Unlike the regular Full CI based CASSCF +calculation in which the Full CI wave function can be fast rebuilt by a fresh +running, the restart feature of DMRG-CASSCF calculation relies on the wave +function indicated by the ``mc.fcisolver.scratchDirectory`` attribute and the +``restart`` flag of DMRG solver:: + + mc = dmrgscf.dmrgci.DMRGSCF(mol) + mc.fcisolver.scratchDirectory = "/path/to/last/dmrg/scratch" + mc.fcisolver.restart = True + mc.run() + +.. note:: Assigne the direct Block wave function directory to + mc.fcisolver.scratchDirectory. An incorrect DMRG wave function may cause + DMRG-CASSCF crash. + +Other common features like state-average DMRG-CASSCF or state-specific for +excited state can be easily called with the ``DMRGSCF`` wrapper function:: + + from pyscf import gto, scf, mcscf, dmrgscf + mol = gto.M(atom="C 0 0 0; C 0 0 1", basis="ccpvdz") + mf = scf.RHF(mol) + mf.run() + mc = dmrgscf.dmrgci.DMRGSCF(mf, 6, 6) + # half-half average over ground state and first excited state + mc.state_average_([0.5, 0.5]) + mc.run() + + # Optimize the first excited state + mc.state_specific_(state=1) + mc.run() + +More information of their usage can be found in PySCF examples +`10-state_average.py `_ +and +`11-excited_states.py `_. + + +DMRG-NEVPT2 +----------- + +DMRG-NEVPT2 calculation is straightforward if the DMRG-CASCI or DMRG-CASSCF are +finished:: + + from pyscf import gto, scf, dmrgscf, mrpt + mol = gto.M(atom="C 0 0 0; C 0 0 1", basis="ccpvdz") + mf = scf.RHF(mol).run() + + mc = dmrgscf.dmrgci.DMRGSCF(mf, 6, 6).run() + mrpt.nevpt2.sc_nevpt(mc) + + mc = mcscf.CASCI(mf, 6, 6) + mc.fcisolver = dmrgscf.dmrgci.DMRGCI(mol) + mc.run() + mrpt.nevpt2.sc_nevpt(mc) + +However, the default DMRG-NEVPT2 calculation is extremely demanding on both CPU +and memory resources. In Block code, there is an effective approximation +implemented which can significantly reduce the computation cost:: + + from pyscf import gto, scf, dmrgscf, mrpt + mol = gto.M(atom="C 0 0 0; C 0 0 1", basis="ccpvdz") + mf = scf.RHF(mol).run() + + mc = dmrgscf.dmrgci.DMRGSCF(mf, 6, 6).run() + + dmrgscf.dmrgci.DMRG_MPS_NEVPT(mc) + mrpt.nevpt2.sc_nevpt(mc, useMPS=True) + +The efficient NEVPT2 implementation is initialized with ``DMRG_MPS_NEVPT`` +function. The most demanding intermediates are computed in this function and +stored on disk. Next we need pass ``useMPS=True`` parameter to the NEVPT2 +method to tell NEVPT2 method to read the precomputed quantities. + +.. note:: The efficient NEVPT2 algorithm is also very demanding, especially on + the memory usage. Please refer to the :ref:`benchmark` for approximate cost. + +If the excitation energy is of interest, we can use DMRG-NEVPT2 to compute the +energy of excited state. Note only the state-specific NEVPT2 calculation is +available in the current Block version:: + + mc = mcscf.CASCI(mf, 6, 6) + mc.fcisolver = dmrgscf.dmrgci.DMRGCI(mol) + mc.fcisolver.nroots = 2 + mc.kernel() + dmrgscf.dmrgci.DMRG_MPS_NEVPT(mc, maxM=100, root=0) + mps_nevpt_e1 = mrpt.nevpt2.sc_nevpt(mc, ci=mc.ci[0], useMPS=True) + dmrgscf.dmrgci.DMRG_MPS_NEVPT(mc, maxM=100, root=1) + mps_nevpt_e2 = mrpt.nevpt2.sc_nevpt(mc, ci=mc.ci[1], useMPS=True) + +In the above example, two NEVPT2 calculations are carried out separately for +two states which are indicated by the ``mc.ci`` attributes. Each NEVPT2 +calculation has its own initializer ``DMRG_MPS_NEVPT`` which is specified by +the ``root`` argument. + +For DMRG-CASSCF and DMRG-NEVPT2 calculations, there are more examples available +in `PySCF source code `_. + + +Using Molpro +============ + +The examples of Block installation and DMRG-SCF calculation can be found in +`Molpro online manual `_. + + +.. ORCA +.. ==== +.. DMRG calculation within ORCA can be found in +.. https://sites.google.com/site/orcainputlibrary/cas-calculations/dmrg . + + diff --git a/docs/source/examples.rst b/docs/source/examples.rst index 049dd9f0..03858c39 100644 --- a/docs/source/examples.rst +++ b/docs/source/examples.rst @@ -1,8 +1,10 @@ -Typical Calculations -******************** +Typical Calculations with `BLOCK` +********************************* In the following the DMRG calculation for C\ :sub:`2` molecule is used to demonstrate various computational features as of the current 1.0.0 release. -Integrals and orbitals must be supplied externally in Molpro's ``FCIDUMP`` format, as ``BLOCK`` does not generate its own integrals. +Integrals and orbitals must be supplied externally in +`Molpro's FCIDUMP format `_, +as ``BLOCK`` does not generate its own integrals. The associated integral files for C\ :sub:`2` can be found here: `FCIDUMP `_ for its D\ :sub:`2h` point-group symmetry. diff --git a/docs/source/index.rst b/docs/source/index.rst index f89735b0..50546583 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -35,7 +35,10 @@ Contents overview.rst build.rst examples.rst + examples-with-pyscf.rst keywords.rst + benchmark.rst + CHANGELOG.rst .. Indices and tables .. ================== diff --git a/docs/source/overview.rst b/docs/source/overview.rst index 187d9309..6149aff4 100644 --- a/docs/source/overview.rst +++ b/docs/source/overview.rst @@ -10,9 +10,9 @@ Features * one-, two-, three- and four-particle density matrices, * one- and two-particle transition density matrices between two states, * perturbation methods including NEVPT2 and MPSPT, -* DMRG-SCF and/or DMRG-NEVPT2 interfaces to the Molpro, ORCA, Q-Chem and Molcas program packages. +* DMRG-SCF and/or DMRG-NEVPT2 interfaces to the PySCF, Molpro, ORCA, Q-Chem and Molcas program packages. -Calling `Block` as an external function +Calling `BLOCK` as an external function ======================================= The makefile distributed with `Block` code can be used to generate a library file called From f63dde73a718fa91843d78039e74a42506cd9487 Mon Sep 17 00:00:00 2001 From: Qiming Sun Date: Sat, 13 Feb 2016 23:55:27 -0500 Subject: [PATCH 2/2] Update doc --- docs/source/build.rst | 3 +- docs/source/conf.py | 4 +- docs/source/examples-with-pyscf.rst | 69 ++++++++++++----------------- docs/source/keywords.rst | 2 + 4 files changed, 34 insertions(+), 44 deletions(-) diff --git a/docs/source/build.rst b/docs/source/build.rst index 0e28e3e0..31b93696 100644 --- a/docs/source/build.rst +++ b/docs/source/build.rst @@ -93,8 +93,7 @@ Optionally, if `MPI4Py `_ was installed, the efficient DMRG-NEVPT2 implementation can be used, eg:: >>> from pyscf import mrpt - >>> dmrgscf.dmrgci.DMRG_MPS_NEVPT(mc) - >>> mrpt.nevpt2.sc_nevpt(mc, useMPS=True) + >>> mrpt.nevpt2.sc_nevpt(dmrgscf.compress_perturb(mc)) How to run `BLOCK` diff --git a/docs/source/conf.py b/docs/source/conf.py index 606d8c80..a23600e7 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -61,9 +61,9 @@ # built documents. # # The short X.Y version. -version = '1.0.0' +version = '1.1' # The full version, including alpha/beta/rc tags. -release = '1.0.0' +release = '1.1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/source/examples-with-pyscf.rst b/docs/source/examples-with-pyscf.rst index 05c8be80..3fdf477d 100644 --- a/docs/source/examples-with-pyscf.rst +++ b/docs/source/examples-with-pyscf.rst @@ -1,5 +1,5 @@ -BLOCK for Electronic Structure Calculations -******************************************* +DMRG for Electronic Structure Calculations +****************************************** Using PySCF =========== @@ -27,22 +27,19 @@ Executing this script in command line:: $ python example1.py will start BLOCK program with 4 processors, assuming that you have the -configuration ``dmrgscf.settings.MPIPREFIX = "mpirun -n 4"`` - -.. note:: Don't put the prefix ``mpirun -n`` in the command line to start python. - -The number of processors can be dynamically adjusted using ``sys.argv`` -parameter, eg:: +configuration ``dmrgscf.settings.MPIPREFIX = "mpirun -n 4"``. +The number of parallel processors can be dynamically adjusted using +``sys.argv``, eg:: $ cat example2.py import sys from pyscf import gto, scf, dmrgscf - dmrgscf.dmrgci.settings.MPIPREFIX = "mpirun -n %s" % sys.argv[0] + dmrgscf.dmrgci.settings.MPIPREFIX = "mpirun -n %s" % sys.argv[1] mf = gto.M(atom="C 0 0 0; C 0 0 1", basis="ccpvdz").apply(scf.RHF).run() mc = dmrgscf.dmrgci.DMRGSCF(mf, 6, 6) mc.run() - $ python example1.py 4 + $ python example2.py 4 In the above examples, ``gto``, ``scf`` are the standard modules provided by PySCF package. For the use of PySCF package, we refer the reader to the @@ -84,17 +81,17 @@ the settings of ``dmrgsolver`` object, eg to set the sweep schedule:: dmrgsolver.twodot_to_onedot = 38 For more details of the default settings and control parameters, we refer to -the `source code `_ -and the corresponding Block code `keywords list `_. +the `PySCF source code `_ +and the corresponding Block code :ref:`keywords_list` list. To make the embedded DMRG solver work more efficiently in CASSCF iteration, one need carefully tune the DMRG runtime parameters. It means more input arguments and options to be specified for ``dmrgsolver`` object. To simplify the input, we provided a shortcut function ``DMRGSCF`` in the ``dmrgscf`` module, as shown by the first example ``example1.py``. In the ``DMRGSCF`` function, we created -a CASSCF object, and changed its ``fcisolver`` to ``dmrgsolver``, then hook a -function which is used to dynamically adjust sweep scheduler to CASSCF object. -Now, the DMRG-CASSCF method can be applied in one line of input:: +a CASSCF object, and assigned ``dmrgsolver`` to its ``fcisolver``, then hooked +a function which is used to dynamically adjust sweep scheduler to CASSCF object. +Now, the DMRG-CASSCF calculation can be executed in one line of input:: mc = dmrgscf.dmrgci.DMRGSCF(mf, norb, nelec).run() @@ -105,20 +102,20 @@ Apparently, we can modify its value to change the place to save the DMRG wave function. The default directory is read from the dmrgscf configuration parameter ``dmrgscf.settings.BLOCKSCRATCHDIR``. -.. note:: Be sure the mc.fcisolver.scratchDirectory is properly assigned. - Since all DMRGCI object by default uses the same BLOCKSCRATCHDIR settings, +.. note:: Be sure the ``mc.fcisolver.scratchDirectory`` is properly assigned. + Since all DMRGCI object by default uses the same ``BLOCKSCRATCHDIR`` settings, it's easy to cause name conflicts on the scratch directory, especially when two DMRG-CASSCF calculations are executed on the same node. .. note:: Usually, the DMRG wave function is very large. Be sure that the - disk which BLOCKSCRATCHDIR pointed to has enough space. + disk which ``BLOCKSCRATCHDIR`` pointed to has enough space. Due to the complexity of multi-configuration model, it's common that we need -interrupted the CASSCF calculation and restart a new one with modified +interrupt the CASSCF calculation and restart the calculation with modified parameters. To restart the CASSCF calculation, we need the information such as -orbital coefficients and active space CI wave function of last simulation. -Although the orbital coefficients can be save/load through PySCF `chkfile` -module (see `PySCF documentation `_), the CI wave +orbital coefficients and active space CI wave function from last simulation. +Although the orbital coefficients can be save/load through +`PySCF chkfile module `_, the CI wave function are not saved by PySCF. Unlike the regular Full CI based CASSCF calculation in which the Full CI wave function can be fast rebuilt by a fresh running, the restart feature of DMRG-CASSCF calculation relies on the wave @@ -130,9 +127,8 @@ function indicated by the ``mc.fcisolver.scratchDirectory`` attribute and the mc.fcisolver.restart = True mc.run() -.. note:: Assigne the direct Block wave function directory to - mc.fcisolver.scratchDirectory. An incorrect DMRG wave function may cause - DMRG-CASSCF crash. +.. note:: A mismatched DMRG wave function (from wrong + ``mc.fcisolver.scratchDirectory``) may cause DMRG-CASSCF crash. Other common features like state-average DMRG-CASSCF or state-specific for excited state can be easily called with the ``DMRGSCF`` wrapper function:: @@ -176,21 +172,18 @@ finished:: However, the default DMRG-NEVPT2 calculation is extremely demanding on both CPU and memory resources. In Block code, there is an effective approximation -implemented which can significantly reduce the computation cost:: +implemented based on compressed MPS perturber which can significantly +reduce the computation cost:: from pyscf import gto, scf, dmrgscf, mrpt mol = gto.M(atom="C 0 0 0; C 0 0 1", basis="ccpvdz") mf = scf.RHF(mol).run() - mc = dmrgscf.dmrgci.DMRGSCF(mf, 6, 6).run() - dmrgscf.dmrgci.DMRG_MPS_NEVPT(mc) - mrpt.nevpt2.sc_nevpt(mc, useMPS=True) + mrpt.nevpt2.sc_nevpt(dmrgscf.compress_perturb(mc)) -The efficient NEVPT2 implementation is initialized with ``DMRG_MPS_NEVPT`` -function. The most demanding intermediates are computed in this function and -stored on disk. Next we need pass ``useMPS=True`` parameter to the NEVPT2 -method to tell NEVPT2 method to read the precomputed quantities. +The efficient NEVPT2 needs be initialized with ``compress_perturb`` function, +in which the most demanding intermediates are precomputed and stored on disk. .. note:: The efficient NEVPT2 algorithm is also very demanding, especially on the memory usage. Please refer to the :ref:`benchmark` for approximate cost. @@ -203,15 +196,11 @@ available in the current Block version:: mc.fcisolver = dmrgscf.dmrgci.DMRGCI(mol) mc.fcisolver.nroots = 2 mc.kernel() - dmrgscf.dmrgci.DMRG_MPS_NEVPT(mc, maxM=100, root=0) - mps_nevpt_e1 = mrpt.nevpt2.sc_nevpt(mc, ci=mc.ci[0], useMPS=True) - dmrgscf.dmrgci.DMRG_MPS_NEVPT(mc, maxM=100, root=1) - mps_nevpt_e2 = mrpt.nevpt2.sc_nevpt(mc, ci=mc.ci[1], useMPS=True) + mps_nevpt_e1 = mrpt.nevpt2.sc_nevpt(dmrgscf.compress_perturb(mc, maxM=100, root=0)) + mps_nevpt_e2 = mrpt.nevpt2.sc_nevpt(dmrgscf.compress_perturb(mc, maxM=100, root=1)) In the above example, two NEVPT2 calculations are carried out separately for -two states which are indicated by the ``mc.ci`` attributes. Each NEVPT2 -calculation has its own initializer ``DMRG_MPS_NEVPT`` which is specified by -the ``root`` argument. +two states which are indicated by the argument ``root=*``. For DMRG-CASSCF and DMRG-NEVPT2 calculations, there are more examples available in `PySCF source code `_. diff --git a/docs/source/keywords.rst b/docs/source/keywords.rst index 7c9fdba0..033aa6ee 100644 --- a/docs/source/keywords.rst +++ b/docs/source/keywords.rst @@ -1,3 +1,5 @@ +.. _keywords_list: + Keywords ********