diff --git a/.ci/steps/setup_step.yml b/.ci/steps/setup_step.yml new file mode 100644 index 00000000..e30de986 --- /dev/null +++ b/.ci/steps/setup_step.yml @@ -0,0 +1,27 @@ + +parameters: + storagename: # + storagekey: # + conda: seismic-interpretation + +steps: + +- bash: | + echo "##vso[task.prependpath]$CONDA/bin" + +- bash: | + echo "Running setup..." + + # make sure we have the latest and greatest + conda env create -f environment/anaconda/local/environment.yml python=3.6 --force + conda init bash + source activate ${{parameters.conda}} + pip install -e interpretation + pip install -e cv_lib + # add this if pytorch stops detecting GPU + # conda install pytorch torchvision cudatoolkit=9.2 -c pytorch + + # copy your model files like so - using dummy file to illustrate + azcopy --quiet --source:https://${{parameters.storagename}}.blob.core.windows.net/models/model --source-key ${{parameters.storagekey}} --destination ./models/your_model_name + displayName: Setup + failOnStderr: True diff --git a/.ci/steps/unit_test_steps.yml b/.ci/steps/unit_test_steps.yml new file mode 100644 index 00000000..ea06da5c --- /dev/null +++ b/.ci/steps/unit_test_steps.yml @@ -0,0 +1,18 @@ +parameters: + conda: seismic-interpretation + +steps: + - bash: | + echo "Starting unit tests" + source activate ${{parameters.conda}} + pytest --durations=0 --junitxml 'reports/test-unit.xml' cv_lib/tests/ + echo "Unit test job passed" + displayName: Unit Tests Job + failOnStderr: True + + - task: PublishTestResults@2 + displayName: 'Publish Test Results **/test-*.xml' + inputs: + testResultsFiles: '**/test-*.xml' + failTaskOnFailedTests: true + condition: succeededOrFailed() diff --git a/.ci/unit_test_build.yml b/.ci/unit_test_build.yml new file mode 100644 index 00000000..ea7701de --- /dev/null +++ b/.ci/unit_test_build.yml @@ -0,0 +1,28 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# Pull request against these branches will trigger this build +pr: +- master +- staging + +# Any commit to this branch will trigger the build. +trigger: +- master +- staging + +jobs: +# partially disable setup for now - done manually on build VM +- job: DeepSeismic + + displayName: Deep Seismic Main Build + pool: + name: $(AgentName) + + steps: + - template: steps/setup_step.yml + parameters: + storagename: $(storageaccoutname) + storagekey: $(storagekey) + + - template: steps/unit_test_steps.yml diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..f09cc27c --- /dev/null +++ b/.flake8 @@ -0,0 +1,17 @@ +[flake8] +max-line-length = 120 +max-complexity = 18 +select = B,C,E,F,W,T4,B9 +ignore = + # slice notation whitespace, invalid + E203 + # too many leading ‘#’ for block comment + E266 + # module level import not at top of file + E402 + # line break before binary operator + W503 + # blank line contains whitespace + W293 + # line too long + E501 diff --git a/.gitignore b/.gitignore index 894a44cc..a675ed36 100644 --- a/.gitignore +++ b/.gitignore @@ -89,6 +89,24 @@ venv/ ENV/ env.bak/ venv.bak/ +wheels/ + + +.dev_env +.azureml + +# Logs +*.tfevents.* +**/runs +**/log +**/output + +# +interpretation/environment/anaconda/local/src/* +interpretation/environment/anaconda/local/src/cv-lib +.code-workspace.code-workspace +**/.vscode +**/.idea # Spyder project settings .spyderproject @@ -97,8 +115,4 @@ venv.bak/ # Rope project settings .ropeproject -# mkdocs documentation -/site - -# mypy -.mypy_cache/ +*.pth \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..d852a98b --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,17 @@ +repos: +- repo: https://github.com/psf/black + rev: stable + hooks: + - id: black +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v1.2.3 + hooks: + - id: flake8 +- repo: local + hooks: + - id: jupytext + name: jupytext + entry: jupytext --from ipynb --pipe black --check flake8 + pass_filenames: true + files: .ipynb + language: python diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 85fbc9f8..00000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "python.formatting.provider": "black", - "python.linting.enabled": true, - "python.linting.flake8Enabled": true, - "python.linting.pylintEnabled": false, -} \ No newline at end of file diff --git a/AUTHORS.md b/AUTHORS.md new file mode 100644 index 00000000..c0011f3e --- /dev/null +++ b/AUTHORS.md @@ -0,0 +1,32 @@ +Contributor +============ + +All names are sorted alphabetically by last name. +Contributors, please add your name to the list when you submit a patch to the project. + + +Contributors (sorted alphabetically) +------------------------------------- +To contributors: please add your name to the list when you submit a patch to the project. + +* Ashish Bhatia +* Daniel Ciborowski +* George Iordanescu +* Ilia Karmanov +* Max Kaznady +* Vanja Paunic +* Mathew Salvaris + + +## How to be a contributor to the repository +This project welcomes contributions and suggestions. Most contributions require you to agree to a +Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us +the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. + +When you submit a pull request, a CLA bot will automatically determine whether you need to provide +a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions +provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..4c422e46 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,88 @@ +# Contribution Guidelines + +Contributions are welcomed! Here's a few things to know: + +* [Steps to Contributing](#steps-to-contributing) +* [Coding Guidelines](#coding-guidelines) +* [Microsoft Contributor License Agreement](#microsoft-contributor-license-agreement) +* [Code of Conduct](#code-of-conduct) + +## Steps to Contributing + +**TL;DR for contributing: We use the staging branch to land all new features and fixes. To make a contribution, please create a branch from staging, make a modification in the code and create a PR to staging.** + +Here are the basic steps to get started with your first contribution. Please reach out with any questions. +1. Use [open issues](https://github.com/Microsoft/DeepSeismic/issues) to discuss the proposed changes. Create an issue describing changes if necessary to collect feedback. Also, please use provided labels to tag issues so everyone can easily sort issues of interest. +2. [Fork the repo](https://help.github.com/articles/fork-a-repo/) so you can make and test local changes. +3. Create a new branch **from staging branch** for the issue (please do not create a branch from master). We suggest prefixing the branch with your username and then a descriptive title: (e.g. username/update_contributing_docs) +4. Create a test that replicates the issue. +5. Make code changes. +6. Ensure unit tests pass and code style / formatting is consistent TODO: add docstring links. +7. Create a pull request against **staging** branch. + +Once the features included in a [milestone](https://github.com/Microsoft/DeepSeismic/milestones) are completed, we will merge contrib into staging. TODO: make a wiki with coding guidelines. + +## Coding Guidelines + +We strive to maintain high quality code to make the utilities in the repository easy to understand, use, and extend. We also work hard to maintain a friendly and constructive environment. We've found that having clear expectations on the development process and consistent style helps to ensure everyone can contribute and collaborate effectively. + +### Code formatting and style checking +We use `git-hooks` to automate the process of formatting and style checking the code. In particular, we use `black` as a code formatter, `flake8` for style checking, and the `pre-commit` Python framework, which ensures that both, code formatter and checker, are ran on the code during commit. If they are executed with no issues, then the commit is made, otherwise, the commit is denied until stylistic or formatting changes are made. + +Please follow these instructions to set up `pre-commit` in your environment. + +``` +pip install pre-commit +pre-commit install +``` + +The above will install the pre-commit package, and install git hooks specified in `.pre-commit-config.yaml` into your `.git/` directory. + +## Microsoft Contributor License Agreement + +Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. + +TODO: add CLA-bot + +## Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). + +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +Apart from the official Code of Conduct developed by Microsoft, in the Computer Vision team we adopt the following behaviors, to ensure a great working environment: + +#### Do not point fingers +Let’s be constructive. + +
+Click here to see some examples + +"This method is missing docstrings" instead of "YOU forgot to put docstrings". + +
+ +#### Provide code feedback based on evidence + +When making code reviews, try to support your ideas based on evidence (papers, library documentation, stackoverflow, etc) rather than your personal preferences. + +
+Click here to see some examples + +"When reviewing this code, I saw that the Python implementation the metrics are based on classes, however, [scikit-learn](https://scikit-learn.org/stable/modules/classes.html#sklearn-metrics-metrics) and [tensorflow](https://www.tensorflow.org/api_docs/python/tf/metrics) use functions. We should follow the standard in the industry." + +
+ + +#### Ask questions - do not give answers +Try to be empathic. + +
+Click here to see some examples + +* Would it make more sense if ...? +* Have you considered this ... ? + +
+ + diff --git a/DeepSeismicLogo.jpg b/DeepSeismicLogo.jpg new file mode 100644 index 00000000..6c68879d Binary files /dev/null and b/DeepSeismicLogo.jpg differ diff --git a/LICENSE b/LICENSE index 3d8b93bc..236805ac 100644 --- a/LICENSE +++ b/LICENSE @@ -1,21 +1,22 @@ - MIT License - - Copyright (c) Microsoft Corporation. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE + \ No newline at end of file diff --git a/NOTICE.txt b/NOTICE.txt new file mode 100755 index 00000000..6dc34351 --- /dev/null +++ b/NOTICE.txt @@ -0,0 +1,2058 @@ +NOTICES AND INFORMATION +Do Not Translate or Localize + +This software incorporates material from third parties. +Microsoft makes certain open source code available at https://3rdpartysource.microsoft.com, +or you may send a check or money order for US $5.00, including the product name, +the open source component name, and version number, to: + +Source Code Compliance Team +Microsoft Corporation +One Microsoft Way +Redmond, WA 98052 +USA + +Notwithstanding any other terms, you may reverse engineer this software to the extent +required to debug changes to any libraries licensed under the GNU Lesser General Public License. + + +------------------------------------------------------------------- + +h5py 2.9.0 - BSD-2-Clause +PyTables Copyright Statement +Copyright (c) 2009 Darren Dale +Copyright 2006-2007 by The HDF Group +Copyright (c) 2006-2008 Alexander Chemeris +Copyright (c) 2002, 2003, 2004 Francesc Altet +Copyright 2001-2013 Python Software Foundation +Copyright (c) 2005, 2006, 2007 Carabos Coop. V. +copyright u'2014, Andrew Collette and contributors +Copyright 2008-2013 Andrew Collette and contributors +Copyright 2008-2018 Andrew Collette and contributors +Copyright (c) 2008 Andrew Collette http://h5py.alfven.org +Copyright (c) 2009 Andrew Collette http://h5py.alfven.org +Copyright (c) 2008-2009 Andrew Collette http://h5py.alfven.org +Copyright (c) 2000-2007 Marc Alexander Lehmann +Copyright (c) 2000-2008 Marc Alexander Lehmann +Copyright (c) 2008 Andrew Collette and contributors http://h5py.alfven.org +Copyright 1998-2006 by the Board of Trustees of the University of Illinois. +Copyright (c) 2008-2013 Andrew Collette and contributors http://www.h5py.org + +Copyright (c) 2001, 2002 Enthought, Inc. +All rights reserved. + +Copyright (c) 2003-2019 SciPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + a. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + b. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + c. Neither the name of Enthought nor the names of the SciPy Developers + may be used to endorse or promote products derived from this software + without specific prior written permission. + + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS +BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, +OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. + + + +SciPy bundles a number of libraries that are compatibly licensed. We list +these here. + +Name: Numpydoc +Files: doc/sphinxext/numpydoc/* +License: 2-clause BSD + For details, see doc/sphinxext/LICENSE.txt + +Name: scipy-sphinx-theme +Files: doc/scipy-sphinx-theme/* +License: 3-clause BSD, PSF and Apache 2.0 + For details, see doc/sphinxext/LICENSE.txt + +Name: Six +Files: scipy/_lib/six.py +License: MIT + For details, see the header inside scipy/_lib/six.py + +Name: Decorator +Files: scipy/_lib/decorator.py +License: 2-clause BSD + For details, see the header inside scipy/_lib/decorator.py + +Name: ID +Files: scipy/linalg/src/id_dist/* +License: 3-clause BSD + For details, see scipy/linalg/src/id_dist/doc/doc.tex + +Name: L-BFGS-B +Files: scipy/optimize/lbfgsb/* +License: BSD license + For details, see scipy/optimize/lbfgsb/README + +Name: SuperLU +Files: scipy/sparse/linalg/dsolve/SuperLU/* +License: 3-clause BSD + For details, see scipy/sparse/linalg/dsolve/SuperLU/License.txt + +Name: ARPACK +Files: scipy/sparse/linalg/eigen/arpack/ARPACK/* +License: 3-clause BSD + For details, see scipy/sparse/linalg/eigen/arpack/ARPACK/COPYING + +Name: Qhull +Files: scipy/spatial/qhull/* +License: Qhull license (BSD-like) + For details, see scipy/spatial/qhull/COPYING.txt + +Name: Cephes +Files: scipy/special/cephes/* +License: 3-clause BSD + Distributed under 3-clause BSD license with permission from the author, + see https://lists.debian.org/debian-legal/2004/12/msg00295.html + + Cephes Math Library Release 2.8: June, 2000 + Copyright 1984, 1995, 2000 by Stephen L. Moshier + + This software is derived from the Cephes Math Library and is + incorporated herein by permission of the author. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Name: Faddeeva +Files: scipy/special/Faddeeva.* +License: MIT + Copyright (c) 2012 Massachusetts Institute of Technology + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +Name: qd +Files: scipy/special/cephes/dd_*.[ch] +License: modified BSD license ("BSD-LBNL-License.doc") + This work was supported by the Director, Office of Science, Division + of Mathematical, Information, and Computational Sciences of the + U.S. Department of Energy under contract numbers DE-AC03-76SF00098 and + DE-AC02-05CH11231. + + Copyright (c) 2003-2009, The Regents of the University of California, + through Lawrence Berkeley National Laboratory (subject to receipt of + any required approvals from U.S. Dept. of Energy) All rights reserved. + + 1. Redistribution and use in source and binary forms, with or + without modification, are permitted provided that the following + conditions are met: + + (1) Redistributions of source code must retain the copyright + notice, this list of conditions and the following disclaimer. + + (2) Redistributions in binary form must reproduce the copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + (3) Neither the name of the University of California, Lawrence + Berkeley National Laboratory, U.S. Dept. of Energy nor the names + of its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + 2. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 3. You are under no obligation whatsoever to provide any bug fixes, + patches, or upgrades to the features, functionality or performance of + the source code ("Enhancements") to anyone; however, if you choose to + make your Enhancements available either publicly, or directly to + Lawrence Berkeley National Laboratory, without imposing a separate + written license agreement for such Enhancements, then you hereby grant + the following license: a non-exclusive, royalty-free perpetual license + to install, use, modify, prepare derivative works, incorporate into + other computer software, distribute, and sublicense such enhancements + or derivative works thereof, in binary and source code form. + + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +Copyright (C) 2008 Stefan van der Walt , Pauli Virtanen + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + + +Copyright (C) 2003-2005 Peter J. Verveer + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +3. The name of the author may not be used to endorse or promote + products derived from this software without specific prior + written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + + +Copyright (c) 2002-2005, Jean-Sebastien Roy (js@jeannot.org) + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + + Qhull, Copyright (c) 1993-2015 + + C.B. Barber + Arlington, MA + + and + + The National Science and Technology Research Center for + Computation and Visualization of Geometric Structures + (The Geometry Center) + University of Minnesota + + email: qhull@qhull.org + +This software includes Qhull from C.B. Barber and The Geometry Center. +Qhull is copyrighted as noted above. Qhull is free software and may +be obtained via http from www.qhull.org. It may be freely copied, modified, +and redistributed under the following conditions: + +1. All copyright notices must remain intact in all files. + +2. A copy of this text file must be distributed along with any copies + of Qhull that you redistribute; this includes copies that you have + modified, or copies of programs or other software products that + include Qhull. + +3. If you modify Qhull, you must include a notice giving the + name of the person performing the modification, the date of + modification, and the reason for such modification. + +4. When distributing modified versions of Qhull, or other software + products that include Qhull, you must provide notice that the original + source code may be obtained as noted above. + +5. There is no warranty or other guarantee of fitness for Qhull, it is + provided solely "as is". Bug reports or fixes may be sent to + qhull_bug@qhull.org; the authors may or may not act on them as + they desire. + + +------------------------------------------------------------------- + +------------------------------------------------------------------- + +scipy 1.3.0 - BSD-2-Clause +(c) Col +(c) KvK +(c) 2011 +(c) 2012 +(c) 2014 +(c) KKOK KkK +(c) B Whether +(c) , (R,1) col +copyright u'2017 +(c) Compute Hessian +Copyright 2014 PSF. +(c) .GT. ZERO .AND. ABS +Copyright Gautam Sewani +(c) KKKKKwKnKK K KQKKKKe +copyrighted by Alan Genz +Copyright 2006 Johan Rade +Copyright Paul A. Bristow +Csp self.spmatrix (c) Dsp +(c) KKKKY KKKKKKKKKKKKKKKKe +(c) KZK8K K9K8K KCKDKCKDK7K +(c) KaKKKQK K KzKkKKKiKKqKK +Copyright 2006 John Maddock +Copyright 2012 Twitter, Inc +Copyright 2000 by Alan Genz. +Copyright 2008 Gautam Sewani +Copyright 2013 Andrea Gavana +Copyright Gautam Sewani 2008 +Copyright John Maddock 2005. +Copyright John Maddock 2006. +Copyright John Maddock 2007. +Copyright John Maddock 2008. +Copyright John Maddock 2009. +Copyright John Maddock 2010. +Copyright John Maddock 2011. +Copyright John Maddock 2012. +Copyright John Maddock 2013. +Copyright Paul Bristow 2007. +Copyright Yosef Meller, 2009 +Copyright (c) 2006 Johan Rade +Copyright (c) 2014 Eric Moore +Copyright (c) Piers Lawrence. +Copyright 2002 Pearu Peterson +Copyright 2014, Eric W. Moore +Copyright Xiaogang Zhang 2006 +copyright Cephes Math Library +(c) KKPSKKtK KWKzKeKzKvK KyKjK +Copyright (c) 2008 Damian Eads +Copyright (c) 2012 Google Inc. +Copyright 1999 Travis Oliphant +Copyright 2002 Gary Strangman. +Copyright 2005 Travis Oliphant +Copyright 2010 Paul A. Bristow +Copyright 2011 Paul A. Bristow +Copyright 2012 Paul A. Bristow +Copyright John Maddock 2006-7. +Copyright John Maddock 2007-8. +Qhull, Copyright (c) 1993-2015 +copyrighted by Enthought, Inc. +(c) KSSKaKoKUKoKeKIKKKKK KKKKKK +Copyright (c) 2006 John Maddock +Copyright (c) 2007 John Maddock +Copyright (c) 2011 John Maddock +Copyright (c) 2016 Adrian Veres +Copyright (c) Tyler Reddy, 2016 +Copyright Paul A. Bristow 2006. +Copyright Paul A. Bristow 2007. +Copyright Paul A. Bristow 2010. +Copyright Paul A. Bristow 2012. +Copyright Paul A. Bristow 2013. +(c) Copyright Hubert Holin 2003. +(c) Copyright John Maddock 2005. +(c) Copyright John Maddock 2006. +(c) Copyright John Maddock 2007. +(c) Copyright John Maddock 2008. +(c) Copyright John Maddock 2010. +Copyright (c) 2007, Damian Eads. +Copyright (c) 2013 Kenneth L. Ho +Copyright 1991 Dieter Kraft, FHM +Copyright Anne M. Archibald 2008 +Copyright Benjamin Sobotta 2012. +(c) Copyright Bruno Lalande 2008. +Copyright (c) 2006 Xiaogang Zhang +Copyright (c) 2009 Pauli Virtanen +Copyright (c) 2009, Motorola, Inc +Copyright (c) 2013 Pauli Virtanen +Copyright 2011 Paul A. Bristow To +Copyright Paul A. Bristow 2006-7. +(c) Copyright John Maddock 2006-7. +(c) Copyright Paul A. Bristow 2011 +Copyright (c) 2002 Travis Oliphant +Copyright (c) 2011 Paul A. Bristow +Copyright (c) 2012 Paul A. Bristow +Copyright John Maddock 2006, 2007. +Copyright John Maddock 2006, 2011. +Copyright John Maddock 2006, 2012. +Copyright John Maddock 2008, 2012. +Copyright Paul Bristow 2006, 2007. +Copyright Paul Bristow 2007, 2011. +Copyright (c) 1988 by Theo Jurriens +Copyright (c) Benjamin Sobotta 2012 +Copyright (c) Pauli Virtanen, 2010. +Copyright 2002 H Lohninger, TU Wein +Copyright 2015 Jon Lund Steffensen. +Copyright Thijs van den Berg, 2008. +Copyright (c) 1993-2015 C.B. Barber. +Copyright (c) 2007 Cybozu Labs, Inc. +Copyright Paul A. Bristow 2009, 2011 +(c) Copyright Hubert Holin 2003-2005. +(c) KyKOKQKOKEK9K8K KFKGKGKJKHKKAKKAK +Copyright (c) 2007 - Sebastien Fabbro +Copyright (c) 2011 Paul A. Bristow To +Copyright (c) 2014 Mathjax Consortium +Copyright (c) 2015-2017 Martin Hensel +Copyright (c) 2016 2017 Felix Lenders +Copyright (c) Damian Eads, 2007-2008. +Copyright Christopher Kormanyos 2013. +Copyright Paul A. Bristow 2007, 2009. +Copyright Paul A. Bristow 2007, 2010. +Copyright Paul A. Bristow 2007, 2012. +Copyright Paul A. Bristow 2008, 2009. +Copyright (c) 2007, 2008, Damian Eads. +Copyright (c) 2012, Jaydeep P. Bardhan +Copyright (c) 2012, Matthew G. Knepley +Copyright (c) 2014, Janani Padmanabhan +Copyright 2004-2005 by Enthought, Inc. +Copyright 2007-2011 by the Sphinx team +Copyright 2007-2018 by the Sphinx team +copyright 2008- s, The SciPy community +(c) Copyright Daryle Walker 2001, 2006. +Copyright (c) 2010 Thomas P. Robitaille +Copyright (c) 1989-2004 Johannes Braams. +Copyright (c) 1994 by Xerox Corporation. +Copyright (c) 1996-2008 Rice University. +Copyright (c) 2001, 2002 Enthought, Inc. +Copyright (c) 2003-2005 Peter J. Verveer +Copyright 2002-2016 The SciPy Developers +Copyright (c) 2003-2019 SciPy Developers. +Copyright (c) 2010-2012 Benjamin Peterson +Copyright (c) 1990-2004 by Johannes Braams +Copyright (c) 2005-2015, Michele Simionato +Copyright (c) 2006-2008 Alexander Chemeris +Copyright 1984, 1995 by Stephen L. Moshier +Copyright 1984, 1996 by Stephen L. Moshier +Copyright 1985 by Stephen L. Moshier Direct +Copyright (c) 1993-2015 The Geometry Center. +Copyright (c) 2001-2011 - Scilab Enterprises +Copyright (c) 2010 - Jordi Gutierrez Hermoso +Copyright (c) 2009-2017 The MathJax Consortium +Copyright (c) 2010-2017 The MathJax Consortium +Copyright (c) 2011-2015 The MathJax Consortium +Copyright (c) 2011-2017 The MathJax Consortium +Copyright (c) 2013-2017 The MathJax Consortium +Copyright (c) 2014-2017 The MathJax Consortium +Copyright (c) 2015-2017 The MathJax Consortium +Copyright (c) 2016-2017 The MathJax Consortium +Copyright J.S. Roy (js@jeannot.org), 2002-2005 +Copyright (c) 2009, Pauli Virtanen +Copyright (c) 2015, Pauli Virtanen +Copyright 1984, 1987, 1995 by Stephen L. Moshier +Copyright 1984, 1987, 2000 by Stephen L. Moshier +Copyright 1984, 1995, 2000 by Stephen L. Moshier +Copyright 1985, 1987, 2000 by Stephen L. Moshier +Copyright 1984, 1987 by Stephen L. Moshier Direct +Copyright 1984, 1991 by Stephen L. Moshier Direct +Copyright 1985, 1987 by Stephen L. Moshier Direct +Copyright Paul A. Bristow 2007, 2009, 2010, 2012. +Copyright (c) 2010 David Fong and Michael Saunders +copyright u'2013, Surya Kasturi and Pauli Virtanen +Copyright (c) 1992-2015 The University of Tennessee +Copyright (c) 2006, Systems Optimization Laboratory +Copyright (c) 2007, John Travers +Copyright (c) 1998-2003 by the University of Florida. +Copyright 1984, 1987, 1988, 2000 by Stephen L. Moshier +Copyright 1984, 1987, 1989, 1995 by Stephen L. Moshier +Copyright 1984, 1987, 1989, 2000 by Stephen L. Moshier +Copyright 1984, 1987, 1992, 2000 by Stephen L. Moshier +Copyright 1984, 1987, 1988 by Stephen L. Moshier Direct +Copyright 1984, 1987, 1989 by Stephen L. Moshier Direct +Copyright 1984, 1987, 1993 by Stephen L. Moshier Direct +Copyright 1985, 1987, 1989 by Stephen L. Moshier Direct +Copyright (c) 2012 Massachusetts Institute of Technology +Copyright (c) 2006-2007, Robert Hetland +Copyright (c) 2006-2015 The University of Colorado Denver. +Copyright (c) 2002-2005, Jean-Sebastien Roy (js@jeannot.org) +Copyright (c) 2004-2005, Jean-Sebastien Roy (js@jeannot.org) +Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier +Copyright 1984, 1987, 1988, 1992 by Stephen L. Moshier Direct +Copyright 1984, 1987, 1989, 1992 by Stephen L. Moshier Direct +Copyright (c) 2000-2015 The University of California Berkeley. +Copyright (c) Tyler Reddy, Richard Gowers, and Max Linke, 2016 +Copyright (c) 2004 David M. Cooke +Copyright Daryle Walker, Hubert Holin, John Maddock 2006 - 2007 +copyrighted 2004 by David M. Cooke +Copyright (c) 2008 Brian M. Clapper , Gael Varoquaux +Copyright (c) 2011 Kevin Dunn, Surya K, Pauli Virtanen, the Sphinx team +Copyright 2014 by P.-G. Martinsson, V. Rokhlin, Y. Shkolnisky, and M. Tygert. +KQKJKCKGKEKBKMKrK (c) KUKoKoKiKeKeKiKiKiKiKiKiKiKiKiKiKiKiKiKiKoKeKaKiKiKiKiKdKIKAKThKOK +Copyright (c) 2008 Stefan van der Walt , Pauli Virtanen +Copyright (c) 2018 Sylvain Gubian , Yang Xiang +(c) KKdegKoKKKKY KY KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe +Copyright (c) Tyler Reddy, Ross Hemsley, Edd Edmondson, Nikolai Nowaczyk, Joe Pitt-Francis, 2015. +Copyright (c) 2003, The Regents of the University of California, through Lawrence Berkeley National Laboratory +Copyright (c) 2003-2009, The Regents of the University of California, through Lawrence Berkeley National Laboratory +Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Python Software Foundation + +Copyright (c) 2001, 2002 Enthought, Inc. +All rights reserved. + +Copyright (c) 2003-2019 SciPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + a. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + b. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + c. Neither the name of Enthought nor the names of the SciPy Developers + may be used to endorse or promote products derived from this software + without specific prior written permission. + + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS +BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, +OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. + + + +SciPy bundles a number of libraries that are compatibly licensed. We list +these here. + +Name: Numpydoc +Files: doc/sphinxext/numpydoc/* +License: 2-clause BSD + For details, see doc/sphinxext/LICENSE.txt + +Name: scipy-sphinx-theme +Files: doc/scipy-sphinx-theme/* +License: 3-clause BSD, PSF and Apache 2.0 + For details, see doc/sphinxext/LICENSE.txt + +Name: Six +Files: scipy/_lib/six.py +License: MIT + For details, see the header inside scipy/_lib/six.py + +Name: Decorator +Files: scipy/_lib/decorator.py +License: 2-clause BSD + For details, see the header inside scipy/_lib/decorator.py + +Name: ID +Files: scipy/linalg/src/id_dist/* +License: 3-clause BSD + For details, see scipy/linalg/src/id_dist/doc/doc.tex + +Name: L-BFGS-B +Files: scipy/optimize/lbfgsb/* +License: BSD license + For details, see scipy/optimize/lbfgsb/README + +Name: SuperLU +Files: scipy/sparse/linalg/dsolve/SuperLU/* +License: 3-clause BSD + For details, see scipy/sparse/linalg/dsolve/SuperLU/License.txt + +Name: ARPACK +Files: scipy/sparse/linalg/eigen/arpack/ARPACK/* +License: 3-clause BSD + For details, see scipy/sparse/linalg/eigen/arpack/ARPACK/COPYING + +Name: Qhull +Files: scipy/spatial/qhull/* +License: Qhull license (BSD-like) + For details, see scipy/spatial/qhull/COPYING.txt + +Name: Cephes +Files: scipy/special/cephes/* +License: 3-clause BSD + Distributed under 3-clause BSD license with permission from the author, + see https://lists.debian.org/debian-legal/2004/12/msg00295.html + + Cephes Math Library Release 2.8: June, 2000 + Copyright 1984, 1995, 2000 by Stephen L. Moshier + + This software is derived from the Cephes Math Library and is + incorporated herein by permission of the author. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Name: Faddeeva +Files: scipy/special/Faddeeva.* +License: MIT + Copyright (c) 2012 Massachusetts Institute of Technology + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +Name: qd +Files: scipy/special/cephes/dd_*.[ch] +License: modified BSD license ("BSD-LBNL-License.doc") + This work was supported by the Director, Office of Science, Division + of Mathematical, Information, and Computational Sciences of the + U.S. Department of Energy under contract numbers DE-AC03-76SF00098 and + DE-AC02-05CH11231. + + Copyright (c) 2003-2009, The Regents of the University of California, + through Lawrence Berkeley National Laboratory (subject to receipt of + any required approvals from U.S. Dept. of Energy) All rights reserved. + + 1. Redistribution and use in source and binary forms, with or + without modification, are permitted provided that the following + conditions are met: + + (1) Redistributions of source code must retain the copyright + notice, this list of conditions and the following disclaimer. + + (2) Redistributions in binary form must reproduce the copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + (3) Neither the name of the University of California, Lawrence + Berkeley National Laboratory, U.S. Dept. of Energy nor the names + of its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + 2. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 3. You are under no obligation whatsoever to provide any bug fixes, + patches, or upgrades to the features, functionality or performance of + the source code ("Enhancements") to anyone; however, if you choose to + make your Enhancements available either publicly, or directly to + Lawrence Berkeley National Laboratory, without imposing a separate + written license agreement for such Enhancements, then you hereby grant + the following license: a non-exclusive, royalty-free perpetual license + to install, use, modify, prepare derivative works, incorporate into + other computer software, distribute, and sublicense such enhancements + or derivative works thereof, in binary and source code form. + + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +Copyright (C) 2008 Stefan van der Walt , Pauli Virtanen + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + + +Copyright (C) 2003-2005 Peter J. Verveer + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +3. The name of the author may not be used to endorse or promote + products derived from this software without specific prior + written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + + +Copyright (c) 2002-2005, Jean-Sebastien Roy (js@jeannot.org) + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + + Qhull, Copyright (c) 1993-2015 + + C.B. Barber + Arlington, MA + + and + + The National Science and Technology Research Center for + Computation and Visualization of Geometric Structures + (The Geometry Center) + University of Minnesota + + email: qhull@qhull.org + +This software includes Qhull from C.B. Barber and The Geometry Center. +Qhull is copyrighted as noted above. Qhull is free software and may +be obtained via http from www.qhull.org. It may be freely copied, modified, +and redistributed under the following conditions: + +1. All copyright notices must remain intact in all files. + +2. A copy of this text file must be distributed along with any copies + of Qhull that you redistribute; this includes copies that you have + modified, or copies of programs or other software products that + include Qhull. + +3. If you modify Qhull, you must include a notice giving the + name of the person performing the modification, the date of + modification, and the reason for such modification. + +4. When distributing modified versions of Qhull, or other software + products that include Qhull, you must provide notice that the original + source code may be obtained as noted above. + +5. There is no warranty or other guarantee of fitness for Qhull, it is + provided solely "as is". Bug reports or fixes may be sent to + qhull_bug@qhull.org; the authors may or may not act on them as + they desire. + + +------------------------------------------------------------------- + +------------------------------------------------------------------- + +sympy 1.4 - BSD-2-Clause +(c) A. B +(c), cos +(c), cot +(c) + cos +(c) (-1) cos +(c) Fix Qasm +(c) Matrix I +Copyright 2016 +(c) + cos(a) cos +(c) , sin(a) cos +(c), -sin(a) cos +(c) tan(b) + a cos +(c), sin(a) sin(b) cos +(c) + sin(b) cos(a) cos +(c) tan(b) a + sin(b) cos +(c) G PermutationGroup Permutation +Copyright (c) 2014 Matthew Rocklin +(c) cos(b), -sin(b) sin(a) sin(b) cos +copyright 2019 SymPy Development Team +Copyright 2007-2013 by the Sphinx team +copyright 2015, SymPy Development Team +Copyright (c) 2006-2014 SymPy developers +(c) + (sin(a) cos(b) + sin(b) cos(a)) cos +Copyright (c) 2001, 2002 Vasil Yaroshevich +Copyright 2014 by the SymPy Development Team +Copyright (c) 2006-2019 SymPy Development Team +Copyright (c) 2008 The IPython Development Team +Copyright (c) 2008 Jens Rasch +CoprimeQ, Distribute, ProductLog, Floor, PolyGamma +Copyright (c) 2006-2017 SymPy Development Team, 2013-2017 Sergey B Kirpichev +(c) Copyright 2000-2003 Symbolic Computation Laboratory, University of Western Ontario, London, Canada N6A + +Copyright (c) 2001, 2002 Enthought, Inc. +All rights reserved. + +Copyright (c) 2003-2019 SciPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + a. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + b. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + c. Neither the name of Enthought nor the names of the SciPy Developers + may be used to endorse or promote products derived from this software + without specific prior written permission. + + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS +BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, +OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. + + + +SciPy bundles a number of libraries that are compatibly licensed. We list +these here. + +Name: Numpydoc +Files: doc/sphinxext/numpydoc/* +License: 2-clause BSD + For details, see doc/sphinxext/LICENSE.txt + +Name: scipy-sphinx-theme +Files: doc/scipy-sphinx-theme/* +License: 3-clause BSD, PSF and Apache 2.0 + For details, see doc/sphinxext/LICENSE.txt + +Name: Six +Files: scipy/_lib/six.py +License: MIT + For details, see the header inside scipy/_lib/six.py + +Name: Decorator +Files: scipy/_lib/decorator.py +License: 2-clause BSD + For details, see the header inside scipy/_lib/decorator.py + +Name: ID +Files: scipy/linalg/src/id_dist/* +License: 3-clause BSD + For details, see scipy/linalg/src/id_dist/doc/doc.tex + +Name: L-BFGS-B +Files: scipy/optimize/lbfgsb/* +License: BSD license + For details, see scipy/optimize/lbfgsb/README + +Name: SuperLU +Files: scipy/sparse/linalg/dsolve/SuperLU/* +License: 3-clause BSD + For details, see scipy/sparse/linalg/dsolve/SuperLU/License.txt + +Name: ARPACK +Files: scipy/sparse/linalg/eigen/arpack/ARPACK/* +License: 3-clause BSD + For details, see scipy/sparse/linalg/eigen/arpack/ARPACK/COPYING + +Name: Qhull +Files: scipy/spatial/qhull/* +License: Qhull license (BSD-like) + For details, see scipy/spatial/qhull/COPYING.txt + +Name: Cephes +Files: scipy/special/cephes/* +License: 3-clause BSD + Distributed under 3-clause BSD license with permission from the author, + see https://lists.debian.org/debian-legal/2004/12/msg00295.html + + Cephes Math Library Release 2.8: June, 2000 + Copyright 1984, 1995, 2000 by Stephen L. Moshier + + This software is derived from the Cephes Math Library and is + incorporated herein by permission of the author. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Name: Faddeeva +Files: scipy/special/Faddeeva.* +License: MIT + Copyright (c) 2012 Massachusetts Institute of Technology + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +Name: qd +Files: scipy/special/cephes/dd_*.[ch] +License: modified BSD license ("BSD-LBNL-License.doc") + This work was supported by the Director, Office of Science, Division + of Mathematical, Information, and Computational Sciences of the + U.S. Department of Energy under contract numbers DE-AC03-76SF00098 and + DE-AC02-05CH11231. + + Copyright (c) 2003-2009, The Regents of the University of California, + through Lawrence Berkeley National Laboratory (subject to receipt of + any required approvals from U.S. Dept. of Energy) All rights reserved. + + 1. Redistribution and use in source and binary forms, with or + without modification, are permitted provided that the following + conditions are met: + + (1) Redistributions of source code must retain the copyright + notice, this list of conditions and the following disclaimer. + + (2) Redistributions in binary form must reproduce the copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + (3) Neither the name of the University of California, Lawrence + Berkeley National Laboratory, U.S. Dept. of Energy nor the names + of its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + 2. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 3. You are under no obligation whatsoever to provide any bug fixes, + patches, or upgrades to the features, functionality or performance of + the source code ("Enhancements") to anyone; however, if you choose to + make your Enhancements available either publicly, or directly to + Lawrence Berkeley National Laboratory, without imposing a separate + written license agreement for such Enhancements, then you hereby grant + the following license: a non-exclusive, royalty-free perpetual license + to install, use, modify, prepare derivative works, incorporate into + other computer software, distribute, and sublicense such enhancements + or derivative works thereof, in binary and source code form. + + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +Copyright (C) 2008 Stefan van der Walt , Pauli Virtanen + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + + +Copyright (C) 2003-2005 Peter J. Verveer + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +3. The name of the author may not be used to endorse or promote + products derived from this software without specific prior + written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + + +Copyright (c) 2002-2005, Jean-Sebastien Roy (js@jeannot.org) + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + + Qhull, Copyright (c) 1993-2015 + + C.B. Barber + Arlington, MA + + and + + The National Science and Technology Research Center for + Computation and Visualization of Geometric Structures + (The Geometry Center) + University of Minnesota + + email: qhull@qhull.org + +This software includes Qhull from C.B. Barber and The Geometry Center. +Qhull is copyrighted as noted above. Qhull is free software and may +be obtained via http from www.qhull.org. It may be freely copied, modified, +and redistributed under the following conditions: + +1. All copyright notices must remain intact in all files. + +2. A copy of this text file must be distributed along with any copies + of Qhull that you redistribute; this includes copies that you have + modified, or copies of programs or other software products that + include Qhull. + +3. If you modify Qhull, you must include a notice giving the + name of the person performing the modification, the date of + modification, and the reason for such modification. + +4. When distributing modified versions of Qhull, or other software + products that include Qhull, you must provide notice that the original + source code may be obtained as noted above. + +5. There is no warranty or other guarantee of fitness for Qhull, it is + provided solely "as is". Bug reports or fixes may be sent to + qhull_bug@qhull.org; the authors may or may not act on them as + they desire. + + +------------------------------------------------------------------- + +------------------------------------------------------------------- + +dask/dask 54019e9c05134585c9c40e4195206aa78e2ea61a - BSD-3-Clause +Copyright 2002 Gary Strangman. +Copyright 2002-2016 The SciPy Developers +Copyright (c) 2005-2015, NumPy Developers. +copyright u'2014-2018, Anaconda, Inc. and contributors +Copyright (c) 2014-2018, Anaconda, Inc. and contributors + +Copyright (c) . All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------- + +------------------------------------------------------------------- + +mpmath 1.1.0 - BSD-3-Clause + +Copyright (c) . All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------- + +------------------------------------------------------------------- + +numpy 1.17.0 - BSD-3-Clause + +Copyright (c) . All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------- + +------------------------------------------------------------------- + +pytorch/ignite 38a4f37de759e33bc08441bde99bcb50f3d81f55 - BSD-3-Clause +copyright 2018, Torch +Copyright (c) 2018, PyTorch team +Copyright (c) 2010-2017 Benjamin Peterson + +Copyright (c) . All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------- + +------------------------------------------------------------------- + +hrnet/hrnet-semantic-segmentation 06142dc1c7026e256a7561c3e875b06622b5670f - MIT +Copyright (c) 2017 +Copyright (c) Microsoft +Copyright (c) 2019 Microsoft + +Copyright (c) 2010-2018 Benjamin Peterson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +------------------------------------------------------------------- + +------------------------------------------------------------------- + +olivesgatech/facies_classification_benchmark 12102683a1ae78f8fbc953823c35a43b151194b3 - MIT +Copyright (c) 2017 Meet Pragnesh Shah + +Copyright (c) 2010-2018 Benjamin Peterson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +------------------------------------------------------------------- + +------------------------------------------------------------------- + +opesci/devito f6129286d9c0b3a8bfe07e724ac5b00dc762efee - MIT +copyright u'2016-2019, Devito +Copyright (c) 2016, Imperial College, London + +Copyright (c) 2010-2018 Benjamin Peterson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +------------------------------------------------------------------- + +------------------------------------------------------------------- + +six 1.12.0 - MIT +copyright u'2010-2018, Benjamin Peterson +Copyright (c) 2010-2018 Benjamin Peterson + +Copyright (c) 2010-2018 Benjamin Peterson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +------------------------------------------------------------------- + +------------------------------------------------------------------- + +waldeland/cnn-for-asi 6f985cccecf9a811565d0b7cd919412569a22b7b - MIT +Copyright (c) 2017 + +Copyright (c) 2010-2018 Benjamin Peterson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +------------------------------------------------------------------- diff --git a/README.md b/README.md index 8cfec9e1..479e8e37 100644 --- a/README.md +++ b/README.md @@ -1,69 +1,402 @@ ---- -page_type: sample -languages: -- csharp -products: -- dotnet -description: "Add 150 character max description" -urlFragment: "update-this-to-unique-url-stub" ---- - -# DeepSeismic - -![Build Status](https://dev.azure.com/best-practices/deepseismic/_apis/build/status/microsoft.DeepSeismic?branchName=master) -[![Build Status](https://dev.azure.com/best-practices/deepseismic/_apis/build/status/microsoft.DeepSeismic?branchName=master)](https://dev.azure.com/best-practices/deepseismic/_build/latest?definitionId=108&branchName=master) - -# Official Microsoft Sample - - - -Give a short description for your sample here. What does it do and why is it important? - -## Contents - -Outline the file contents of the repository. It helps users navigate the codebase, build configuration and any related assets. - -| File/folder | Description | -|-------------------|--------------------------------------------| -| `src` | Sample source code. | -| `.gitignore` | Define what to ignore at commit time. | -| `CHANGELOG.md` | List of changes to the sample. | -| `CONTRIBUTING.md` | Guidelines for contributing to the sample. | -| `README.md` | This README file. | -| `LICENSE` | The license for the sample. | - -## Prerequisites - -Outline the required components and tools that a user might need to have on their machine in order to run the sample. This can be anything from frameworks, SDKs, OS versions or IDE releases. - -## Setup - -Explain how to prepare the sample once the user clones or downloads the repository. The section should outline every step necessary to install dependencies and set up any settings (for example, API keys and output folders). - -## Runnning the sample - -Outline step-by-step instructions to execute the sample and see its output. Include steps for executing the sample from the IDE, starting specific services in the Azure portal or anything related to the overall launch of the code. - -## Key concepts - -Provide users with more context on the tools and services used in the sample. Explain some of the code that is being used and how services interact with each other. - -## Contributing - -This project welcomes contributions and suggestions. Most contributions require you to agree to a -Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us -the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. - -When you submit a pull request, a CLA bot will automatically determine whether you need to provide -a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions -provided by the bot. You will only need to do this once across all repos using our CLA. - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. +# DeepSeismic +![DeepSeismic](./assets/DeepSeismicLogo.jpg ) + +This repository shows you how to perform seismic imaging and interpretation on Azure. It empowers geophysicists and data scientists to run seismic experiments using state-of-art DSL-based PDE solvers and segmentation algorithms on Azure. + +The repository provides sample notebooks, data loaders for seismic data, utilities, and out-of-the box ML pipelines, organized as follows: +- **sample notebooks**: these can be found in the `examples` folder - they are standard Jupyter notebooks which highlight how to use the codebase by walking the user through a set of pre-made examples +- **experiments**: the goal is to provide runnable Python scripts which train and test (score) our machine learning models in `experiments` folder. The models themselves are swappable, meaning a single train script can be used to run a different model on the same dataset by simply swapping out the configuration file which defines the model. Experiments are organized by model types and datasets - for example, "2D segmentation on Dutch F3 dataset", "2D segmentation on Penobscot dataset" and "3D segmentation on Penobscot dataset" are all different experiments. As another example, if one is swapping 2D segmentation models on Dutch F3 dataset, one would just point the train and test scripts to a different configuration file within the same experiment. +- **pip installable utilities**: we provide `cv_lib` and `deepseismic_interpretation` utilities (more info below) which are used by both sample notebooks and experiments mentioned above + +DeepSeismic currently focuses on Seismic Interpretation (3D segmentation aka facies classification) with experimental code provided around Seismic Imaging. + +### Quick Start + +There are two ways to get started with the DeepSeismic codebase, which currently focuses on Interpretation: +- if you'd like to get an idea of how our interpretation (segmentation) models are used, simply review the [HRNet demo notebook](https://github.com/microsoft/DeepSeismic/blob/master/examples/interpretation/notebooks/HRNet_Penobscot_demo_notebook.ipynb) +- to actually run the code, you'll need to set up a compute environment (which includes setting up a GPU-enabled Linux VM and downloading the appropriate Anaconda Python packages) and download the datasets which you'd like to work with - detailed steps for doing this are provided in the next `Interpretation` section below. + +If you run into any problems, chances are your problem has already been solved in the [Troubleshooting](#troubleshooting) section. + +### Pre-run notebooks + +Notebooks stored in the repository have output intentionally displaced - you can find full auto-generated versions of the notebooks here: +- **HRNet Penobscot demo**: [[HTML](https://deepseismicstore.blob.core.windows.net/shared/HRNet_Penobscot_demo_notebook.html)] [[.ipynb](https://deepseismicstore.blob.core.windows.net/shared/HRNet_Penobscot_demo_notebook.ipynb)] +- **Dutch F3 dataset**: [[HTML](https://deepseismicstore.blob.core.windows.net/shared/F3_block_training_and_evaluation_local.html)] [[.ipynb](https://deepseismicstore.blob.core.windows.net/shared/F3_block_training_and_evaluation_local.ipynb)] + +### Azure Machine Learning +[Azure Machine Learning](https://docs.microsoft.com/en-us/azure/machine-learning/) enables you to train and deploy your machine learning models and pipelines at scale, ane leverage open-source Python frameworks, such as PyTorch, TensorFlow, and scikit-learn. If you are looking at getting started with using the code in this repository with Azure Machine Learning, refer to [Azure Machine Learning How-to](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml) to get started. + +## Interpretation +For seismic interpretation, the repository consists of extensible machine learning pipelines, that shows how you can leverage state-of-the-art segmentation algorithms (UNet, SEResNET, HRNet) for seismic interpretation, and also benchmarking results from running these algorithms using various seismic datasets (Dutch F3, and Penobscot). + +To run examples available on the repo, please follow instructions below to: +1) [Set up the environment](#setting-up-environment) +2) [Download the data sets](#dataset-download-and-preparation) +3) [Run example notebooks and scripts](#run-examples) + +### Setting up Environment + +Follow the instruction bellow to read about compute requirements and install required libraries. + + +#### Compute environment + +We recommend using a virtual machine to run the example notebooks and scripts. Specifically, you will need a GPU powered Linux machine, as this repository is developed and tested on __Linux only__. The easiest way to get started is to use the [Azure Data Science Virtual Machine (DSVM) for Linux (Ubuntu)](https://docs.microsoft.com/en-us/azure/machine-learning/data-science-virtual-machine/dsvm-ubuntu-intro). This VM will come installed with all the system requirements that are needed to create the conda environment described below and then run the notebooks in this repository. + +For this repo, we recommend selecting a multi-GPU Ubuntu VM of type [Standard_NC12](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu#nc-series). The machine is powered by NVIDIA Tesla K80 (or V100 GPU for NCv2 series) which can be found in most Azure regions. + +> NOTE: For users new to Azure, your subscription may not come with a quota for GPUs. You may need to go into the Azure portal to increase your quota for GPU VMs. Learn more about how to do this here: https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits. + + +#### Package Installation + +To install packages contained in this repository, navigate to the directory where you pulled the DeepSeismic repo to run: +```bash +conda env create -f environment/anaconda/local/environment.yml +``` +This will create the appropriate conda environment to run experiments. + +Next you will need to install the common package for interpretation: +```bash +conda activate seismic-interpretation +pip install -e interpretation +``` + +Then you will also need to install `cv_lib` which contains computer vision related utilities: +```bash +pip install -e cv_lib +``` + +Both repos are installed in developer mode with the `-e` flag. This means that to update simply go to the folder and pull the appropriate commit or branch. + +During development, in case you need to update the environment due to a conda env file change, you can run +``` +conda env update --file environment/anaconda/local/environment.yml +``` +from the root of DeepSeismic repo. + + +### Dataset download and preparation + +This repository provides examples on how to run seismic interpretation on two publicly available annotated seismic datasets: [Penobscot](https://zenodo.org/record/1341774) and [F3 Netherlands](https://github.com/olivesgatech/facies_classification_benchmark). Their respective sizes (uncompressed on disk in your folder after downloading and pre-processing) are: +- **Penobscot**: 7.9 GB +- **Dutch F3**: 2.2 GB + +Please make sure you have enough disk space to download either dataset. + +We have experiments and notebooks which use either one dataset or the other. Depending on which experiment/notebook you want to run you'll need to download the corresponding dataset. We suggest you start by looking at [HRNet demo notebook](https://github.com/microsoft/DeepSeismic/blob/master/examples/interpretation/notebooks/HRNet_Penobscot_demo_notebook.ipynb) which requires the Penobscot dataset. + +#### Penobscot +To download the Penobscot dataset run the [download_penobscot.sh](scripts/download_penobscot.sh) script, e.g. + +``` +data_dir="$HOME/data/penobscot" +mkdir -p "$data_dir" +./scripts/download_penobscot.sh "$data_dir" +``` + +Note that the specified download location should be configured with appropriate `write` permissions. On some Linux virtual machines, you may want to place the data into `/mnt` or `/data` folder so you have to make sure you have write access. + +To make things easier, we suggested you use your home directory where you might run out of space. If this happens on an [Azure Data Science Virtual Machine](https://azure.microsoft.com/en-us/services/virtual-machines/data-science-virtual-machines/) you can resize the disk quite easily from [Azure Portal](https://portal.azure.com) - please see the [Troubleshooting](#troubleshooting) section at the end of this README regarding [how to do this](#how-to-resize-data-science-virtual-machine-disk). + +To prepare the data for the experiments (e.g. split into train/val/test), please run the following script (modifying arguments as desired): + +``` +python scripts/prepare_penobscot.py split_inline --data-dir="$HOME/data/penobscot" --val-ratio=.1 --test-ratio=.2 +``` + +#### F3 Netherlands +To download the F3 Netherlands dataset for 2D experiments, please follow the data download instructions at +[this github repository](https://github.com/yalaudah/facies_classification_benchmark) (section Dataset). + +Once you've downloaded the data set, make sure to create an empty `splits` directory, under the downloaded `data` directory; you can re-use the same data directory as the one for Penobscot dataset created earlier. This is where your training/test/validation splits will be saved. + +``` +cd data +mkdir splits +``` + +At this point, your `data` directory tree should look like this: + +``` +data +├── splits +├── test_once +│ ├── test1_labels.npy +│ ├── test1_seismic.npy +│ ├── test2_labels.npy +│ └── test2_seismic.npy +└── train + ├── train_labels.npy + └── train_seismic.npy +``` + +To prepare the data for the experiments (e.g. split into train/val/test), please run the following script: + +``` +# For section-based experiments +python scripts/prepare_dutchf3.py split_train_val section --data-dir=/mnt/dutchf3 + + +# For patch-based experiments +python scripts/prepare_dutchf3.py split_train_val patch --data-dir=/mnt/dutchf3 --stride=50 --patch=100 + +``` + +Refer to the script itself for more argument options. + +### Run Examples + +#### Notebooks +We provide example notebooks under `examples/interpretation/notebooks/` to demonstrate how to train seismic interpretation models and evaluate them on Penobscot and F3 datasets. + +Make sure to run the notebooks in the conda environment we previously set up (`seismic-interpretation`). To register the conda environment in Jupyter, please run: + +``` +python -m ipykernel install --user --name seismic-interpretation +``` + +#### Experiments + +We also provide scripts for a number of experiments we conducted using different segmentation approaches. These experiments are available under `experiments/interpretation`, and can be used as examples. Within each experiment start from the `train.sh` and `test.sh` scripts under the `local/` (single GPU) and `distributed/` (multiple GPUs) directories, which invoke the corresponding python scripts, `train.py` and `test.py`. Take a look at the experiment configurations (see Experiment Configuration Files section below) for experiment options and modify if necessary. + +Please refer to individual experiment README files for more information. +- [Penobscot](experiments/interpretation/penobscot/README.md) +- [F3 Netherlands Patch](experiments/interpretation/dutchf3_patch/README.md) +- [F3 Netherlands Section](experiments/interpretation/dutchf3_section/README.md) + +#### Configuration Files +We use [YACS](https://github.com/rbgirshick/yacs) configuration library to manage configuration options for the experiments. There are three ways to pass arguments to the experiment scripts (e.g. train.py or test.py): + +- __default.py__ - A project config file `default.py` is a one-stop reference point for all configurable options, and provides sensible defaults for all arguments. If no arguments are passed to `train.py` or `test.py` script (e.g. `python train.py`), the arguments are by default loaded from `default.py`. Please take a look at `default.py` to familiarize yourself with the experiment arguments the script you run uses. + +- __yml config files__ - YAML configuration files under `configs/` are typically created one for each experiment. These are meant to be used for repeatable experiment runs and reproducible settings. Each configuration file only overrides the options that are changing in that experiment (e.g. options loaded from `defaults.py` during an experiment run will be overridden by arguments loaded from the yaml file). As an example, to use yml configuration file with the training script, run: + + ``` + python train.py --cfg "configs/hrnet.yaml" + ``` + +- __command line__ - Finally, options can be passed in through `options` argument, and those will override arguments loaded from the configuration file. We created CLIs for all our scripts (using Python Fire library), so you can pass these options via command-line arguments, like so: + + ``` + python train.py DATASET.ROOT "/mnt/dutchf3" TRAIN.END_EPOCH 10 + ``` + + +### Pretrained Models + +#### HRNet + +To achieve the same results as the benchmarks above you will need to download the HRNet model [pretrained](https://github.com/HRNet/HRNet-Image-Classification) on ImageNet. We are specifically using the [HRNet-W48-C](https://1drv.ms/u/s!Aus8VCZ_C_33dKvqI6pBZlifgJk) pre-trained model; other HRNet variants are also available [here](https://github.com/HRNet/HRNet-Image-Classification) - you can navigate to those from the [main HRNet landing page](https://github.com/HRNet/HRNet-Object-Detection) for object detection. + +Unfortunately the OneDrive location which is used to host the model is using a temporary authentication token, so there is no way for us to scipt up model download. There are two ways to upload and use the pre-trained HRNet model on DS VM: +- download the model to your local drive using a web browser of your choice and then upload the model to the DS VM using something like `scp`; navigate to Portal and copy DS VM's public IP from the Overview panel of your DS VM (you can search your DS VM by name in the search bar of the Portal) then use `scp local_model_location username@DS_VM_public_IP:./model/save/path` to upload +- alternatively you can use the same public IP to open remote desktop over SSH to your Linux VM using [X2Go](https://wiki.x2go.org/doku.php/download:start): you can basically open the web browser on your VM this way and download the model to VM's disk + + +### Viewers (optional) + +For seismic interpretation (segmentation), if you want to visualize cross-sections of a 3D volume (both the input velocity model and the segmented output) you can use +[segyviewer](https://github.com/equinor/segyviewer). To install and use segyviewer, please follow the instructions below. + +#### segyviewer + +To install [segyviewer](https://github.com/equinor/segyviewer) run: +```bash +conda env create -n segyviewer python=2.7 +conda activate segyviewer +conda install -c anaconda pyqt=4.11.4 +pip install segyviewer +``` + +To visualize cross-sections of a 3D volume, you can run +[segyviewer](https://github.com/equinor/segyviewer) like so: +```bash +segyviewer "${HOME}/data/dutchf3/data.segy" +``` + +### Benchmarks + +#### Dense Labels + +This section contains benchmarks of different algorithms for seismic interpretation on 3D seismic datasets with densely-annotated data. + +Below are the results from the models contained in this repo. To run them check the instructions in folder. Alternatively take a look in for how to run them on your own dataset + +#### Netherlands F3 + +| Source | Experiment | PA | FW IoU | MCA | +|------------------|-----------------------------------|-------------|--------------|------------| +| Alaudah et al.| Section-based | 0.905 | 0.817 | .832 | +| | Patch-based | 0.852 | 0.743 | .689 | +| DeepSeismic | Patch-based+fixed | .869 | .761 | .775 | +| | SEResNet UNet+section depth | .917 | .849 | .834 | +| | HRNet(patch)+patch_depth | .908 | .843 | .837 | +| | HRNet(patch)+section_depth | .928 | .871 | .871 | + +#### Penobscot + +Trained and tested on full dataset. Inlines with artefacts were left in for training, validation and testing. +The dataset was split 70% training, 10% validation and 20% test. The results below are from the test set + +| Source | Experiment | PA | IoU | MCA | +|------------------|-------------------------------------|-------------|--------------|------------| +| DeepSeismic | SEResNet UNet + section depth | 1.0 | .98 | .99 | +| | HRNet(patch) + section depth | 1.0 | .97 | .98 | + +![Best Penobscot SEResNet](assets/penobscot_seresnet_best.png "Best performing inlines, Mask and Predictions from SEResNet") +![Worst Penobscot SEResNet](assets/penobscot_seresnet_worst.png "Worst performing inlines Mask and Predictions from SEResNet") + +#### Reproduce benchmarks +In order to reproduce the benchmarks you will need to navigate to the [experiments](experiments) folder. In there each of the experiments +are split into different folders. To run the Netherlands F3 experiment navigate to the [dutchf3_patch/local](experiments/dutchf3_patch/local) folder. In there is a training script [([train.sh](experiments/dutchf3_patch/local/train.sh)) +which will run the training for any configuration you pass in. Once you have run the training you will need to run the [test.sh](experiments/dutchf3_patch/local/test.sh) script. Make sure you specify +the path to the best performing model from your training run, either by passing it in as an argument or altering the YACS config file. + +To reproduce the benchmarks +for the Penobscot dataset follow the same instructions but navigate to the [penobscot](penobscot) folder. + +#### Scripts +- [parallel_training.sh](scripts/parallel_training.sh): Script to launch multiple jobs in parallel. Used mainly for local hyperparameter tuning. Look at the script for further instructions + +- [kill_windows.sh](scripts/kill_windows.sh): Script to kill multiple tmux windows. Used to kill jobs that parallel_training.sh might have started. + + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. + +### Submitting a Pull Request + +We try to keep the repo in a clean state, which means that we only enable read access to the repo - read access still enables one to submit a PR or an issue. To do so, fork the repo, and submit a PR from a branch in your forked repo into our staging branch. + +When you submit a pull request, a CLA bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Build Status +| Build | Branch | Status | +| --- | --- | --- | +| **Legal Compliance** | staging | [![Build Status](https://dev.azure.com/best-practices/deepseismic/_apis/build/status/microsoft.ComponentGovernance%20(seismic-deeplearning)?branchName=staging)](https://dev.azure.com/best-practices/deepseismic/_build/latest?definitionId=124&branchName=staging) | +| **Legal Compliance** | master | [![Build Status](https://dev.azure.com/best-practices/deepseismic/_apis/build/status/microsoft.ComponentGovernance%20(seismic-deeplearning)?branchName=master)](https://dev.azure.com/best-practices/deepseismic/_build/latest?definitionId=124&branchName=master) | +| **Tests** | staging | [![Build Status](https://dev.azure.com/best-practices/deepseismic/_apis/build/status/microsoft.Notebooks%20(seismic-deeplearning)?branchName=staging)](https://dev.azure.com/best-practices/deepseismic/_build/latest?definitionId=125&branchName=staging) | +| **Tests** | master | [![Build Status](https://dev.azure.com/best-practices/deepseismic/_apis/build/status/microsoft.Notebooks%20(seismic-deeplearning)?branchName=master)](https://dev.azure.com/best-practices/deepseismic/_build/latest?definitionId=125&branchName=master) | +| **Notebook Tests** | staging | [![Build Status](https://dev.azure.com/best-practices/deepseismic/_apis/build/status/microsoft.Tests%20(seismic-deeplearning)?branchName=staging)](https://dev.azure.com/best-practices/deepseismic/_build/latest?definitionId=126&branchName=staging) | +| **Notebook Tests** | master | [![Build Status](https://dev.azure.com/best-practices/deepseismic/_apis/build/status/microsoft.Tests%20(seismic-deeplearning)?branchName=master)](https://dev.azure.com/best-practices/deepseismic/_build/latest?definitionId=126&branchName=master) | + + +# Troubleshooting + +For Data Science Virtual Machine conda package installation issues, make sure you locate the anaconda location on the DSVM, for example by running: +```bash +which python +``` +A typical output will be: +```bash +someusername@somevm:/projects/DeepSeismic$ which python +/anaconda/envs/py35/bin/python +``` +which will indicate that anaconda folder is __/anaconda__. We'll refer to this location in instructions below, but you should update the commands according to your local anaconda folder. + +
+ Data Science Virtual Machine conda package installation errors + + It could happen that you don't have sufficient permissions to run conda commands / install packages in an Anaconda packages directory. To remedy the situation, please run the following commands + ```bash + rm -rf /anaconda/pkgs/* + sudo chown -R $(whoami) /anaconda + ``` + + After these commands complete, try installing the packages again. + +
+ +
+ Data Science Virtual Machine conda package installation warnings + + It could happen that while creating the conda environment defined by environment/anaconda/local/environment.yml on an Ubuntu DSVM, one can get multiple warnings like so: + ``` + WARNING conda.gateways.disk.delete:unlink_or_rename_to_trash(140): Could not remove or rename /anaconda/pkgs/ipywidgets-7.5.1-py_0/site-packages/ipywidgets-7.5.1.dist-info/LICENSE. Please remove this file manually (you may need to reboot to free file handles) + ``` + + If this happens, similar to instructions above, stop the conda environment creation (type ```Ctrl+C```) and then change recursively the ownership /anaconda directory from root to current user, by running this command: + + ```bash + sudo chown -R $USER /anaconda + ``` + + After these command completes, try creating the conda environment in __environment/anaconda/local/environment.yml__ again. + +
+ +
+ Model training or scoring is not using GPU + + To see if GPU is being using while your model is being trained or used for inference, run + ```bash + nvidia-smi + ``` + and confirm that you see you Python process using the GPU. + + If not, you may want to try reverting to an older version of CUDA for use with pyTorch. After the environment has been setup, run the following command (by default we use CUDA 10) after running `conda activate seismic-interpretation` to activate the conda environment: + ```bash + conda install pytorch torchvision cudatoolkit=9.2 -c pytorch + ``` + + To test whether this setup worked, right after you can open `ipython` and execute the following code + ```python + import torch + torch.cuda.is_available() + ``` + + The output should say "True". + + If the output is still "False", you may want to try setting your environment variable to specify the device manually - to test this, start a new `ipython` session and type: + ```python + import os + os.environ['CUDA_VISIBLE_DEVICES']='0' + import torch + torch.cuda.is_available() + ``` + + Output should say "True" this time. If it does, you can make the change permanent by adding + ```bash + export CUDA_VISIBLE_DEVICES=0 + ``` + to your `$HOME/.bashrc` file. + +
+ +
+ GPU out of memory errors + + You should be able to see how much GPU memory your process is using by running + ```bash + nvidia-smi + ``` + and seeing if this amount is close to the physical memory limit specified by the GPU manufacturer. + + If we're getting close to the memory limit, you may want to lower the batch size in the model configuration file. Specifically, `TRAIN.BATCH_SIZE_PER_GPU` and `VALIDATION.BATCH_SIZE_PER_GPU` settings. + +
+ +
+ How to resize Data Science Virtual Machine disk + + 1. Go to the [Azure Portal](https://portal.azure.com) and find your virtual machine by typing its name in the search bar at the very top of the page. + + 2. In the Overview panel on the left hand side, click Stop button to stop the virtual machine. + + 3. Next, select Disks in the same panel on the left hand side. + + 4. Click the Name of the OS Disk - you'll be navigated to the Disk view. From this view, select Configuration on the left hand side and then increase Size in GB and hit the Save button. + + 5. Navigate back to the Virtual Machine view in Step 2 and click the Start button to start the virtual machine. + +
+ + + + + diff --git a/WORKERS b/WORKERS new file mode 100644 index 00000000..633ed717 --- /dev/null +++ b/WORKERS @@ -0,0 +1,51 @@ +AUTO_RESUME: False +CUDNN: + BENCHMARK: True + DETERMINISTIC: False + ENABLED: True +DATASET: + CLASS_WEIGHTS: [0.7151, 0.8811, 0.5156, 0.9346, 0.9683, 0.9852] + NUM_CLASSES: 6 + ROOT: +GPUS: (0,) +LOG_CONFIG: logging.conf +LOG_DIR: +MODEL: + IN_CHANNELS: 1 + NAME: patch_deconvnet +OUTPUT_DIR: output +PIN_MEMORY: True +PRINT_FREQ: 20 +SEED: 42 +TEST: + CROSSLINE: True + INLINE: True + MODEL_PATH: + SPLIT: Both + TEST_STRIDE: 10 +TRAIN: + AUGMENTATION: True + AUGMENTATIONS: + PAD: + HEIGHT: 256 + WIDTH: 256 + RESIZE: + HEIGHT: 200 + WIDTH: 200 + BATCH_SIZE_PER_GPU: 32 + BEGIN_EPOCH: 0 + DEPTH: no + END_EPOCH: 484 + MAX_LR: 0.01 + MEAN: 0.0009997 + MIN_LR: 0.001 + MODEL_DIR: models + MOMENTUM: 0.9 + PATCH_SIZE: 99 + SNAPSHOTS: 5 + STD: 0.20977 + STRIDE: 50 + WEIGHT_DECAY: 0.0001 +VALIDATION: + BATCH_SIZE_PER_GPU: 32 +WORKERS: 4 diff --git a/assets/DeepSeismicLogo.jpg b/assets/DeepSeismicLogo.jpg new file mode 100644 index 00000000..6c68879d Binary files /dev/null and b/assets/DeepSeismicLogo.jpg differ diff --git a/assets/penobscot_seresnet_best.png b/assets/penobscot_seresnet_best.png new file mode 100644 index 00000000..4f8f3beb Binary files /dev/null and b/assets/penobscot_seresnet_best.png differ diff --git a/assets/penobscot_seresnet_worst.png b/assets/penobscot_seresnet_worst.png new file mode 100644 index 00000000..369ec4e3 Binary files /dev/null and b/assets/penobscot_seresnet_worst.png differ diff --git a/azure-pipelines.yml b/azure-pipelines.yml deleted file mode 100644 index aa912913..00000000 --- a/azure-pipelines.yml +++ /dev/null @@ -1,19 +0,0 @@ -# Starter pipeline -# Start with a minimal pipeline that you can customize to build and deploy your code. -# Add steps that build, run tests, deploy, and more: -# https://aka.ms/yaml - -trigger: -- master - -pool: - vmImage: 'ubuntu-latest' - -steps: -- script: echo Hello, world! - displayName: 'Run a one-line script' - -- script: | - echo Add other tasks to build, test, and deploy your project. - echo See https://aka.ms/yaml - displayName: 'Run a multi-line script' diff --git a/bin/ds b/bin/ds deleted file mode 100644 index 3bd01081..00000000 --- a/bin/ds +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env python - -from deepseismic import cli - -if __name__ == "__main__": - cli.main() diff --git a/cgmanifest.json b/cgmanifest.json new file mode 100644 index 00000000..d647c543 --- /dev/null +++ b/cgmanifest.json @@ -0,0 +1,64 @@ +{"Registrations":[ + { + "component": { + "type": "git", + "git": { + "repositoryUrl": "https://github.com/olivesgatech/facies_classification_benchmark", + "commitHash": "12102683a1ae78f8fbc953823c35a43b151194b3" + } + }, + "license": "MIT" + }, + { + "component": { + "type": "git", + "git": { + "repositoryUrl": "https://github.com/waldeland/CNN-for-ASI", + "commitHash": "6f985cccecf9a811565d0b7cd919412569a22b7b" + } + }, + "license": "MIT" + }, + { + "component": { + "type": "git", + "git": { + "repositoryUrl": "https://github.com/opesci/devito", + "commitHash": "f6129286d9c0b3a8bfe07e724ac5b00dc762efee" + } + }, + "license": "MIT" + }, + { + "component": { + "type": "git", + "git": { + "repositoryUrl": "https://github.com/pytorch/ignite", + "commitHash": "38a4f37de759e33bc08441bde99bcb50f3d81f55" + } + }, + "license": "BSD-3-Clause" + }, + { + "component": { + "type": "git", + "git": { + "repositoryUrl": "https://github.com/HRNet/HRNet-Semantic-Segmentation", + "commitHash": "06142dc1c7026e256a7561c3e875b06622b5670f" + } + }, + "license": "MIT" + }, + { + "component": { + "type": "git", + "git": { + "repositoryUrl": "https://github.com/dask/dask", + "commitHash": "54019e9c05134585c9c40e4195206aa78e2ea61a" + } + }, + "license": "IPL-1.0" + } + ], + "Version": 1 +} \ No newline at end of file diff --git a/contrib/README.md b/contrib/README.md new file mode 100644 index 00000000..a286b0f3 --- /dev/null +++ b/contrib/README.md @@ -0,0 +1,8 @@ +### Contrib folder + +Code in this folder has not been tested, and are meant for exploratory work only. + +We encourage submissions to the contrib folder, and once they are well-tested, do submit a pull request and work with the repository owners to graduate it to the main DeepSeismic repository. + +Thank you. + diff --git a/contrib/benchmarks/README.md b/contrib/benchmarks/README.md new file mode 100644 index 00000000..14a86937 --- /dev/null +++ b/contrib/benchmarks/README.md @@ -0,0 +1,6 @@ +# Benchmarks + +In this folder we show benchmarks using different algorithms. To facilitate the benchmark computation, we provide a set of wrapper functions that can be found in the file [benchmark_utils.py](benchmark_utils.py). + +TODO + diff --git a/contrib/benchmarks/benchmark_utils.py b/contrib/benchmarks/benchmark_utils.py new file mode 100644 index 00000000..e69de29b diff --git a/contrib/experiments/interpretation/dutchf3_voxel/README.md b/contrib/experiments/interpretation/dutchf3_voxel/README.md new file mode 100644 index 00000000..f794fb37 --- /dev/null +++ b/contrib/experiments/interpretation/dutchf3_voxel/README.md @@ -0,0 +1,17 @@ +First, make sure that `${HOME}/data/dutch_f3` folder exists and you have write access. + +Next, to get the main input dataset which is the [Dutch F3 dataset](https://terranubis.com/datainfo/Netherlands-Offshore-F3-Block-Complete), +navigate to [MalenoV](https://github.com/bolgebrygg/MalenoV) project website and follow the links (which will lead to +[this](https://drive.google.com/drive/folders/0B7brcf-eGK8CbGhBdmZoUnhiTWs) download). Save this file as +`${HOME}/data/dutch_f3/data.segy` + +To download the train and validation masks, from the root of the repo, run +```bash +./contrib/scripts/get_F3_voxel.sh ${HOME}/data/dutch_f3 +``` + +This will also download train and validation masks to the same location as data.segy. + +That's it! + +To run the training script, run `python train.py --cfg=configs/texture_net.yaml`. diff --git a/contrib/experiments/interpretation/dutchf3_voxel/configs/texture_net.yaml b/contrib/experiments/interpretation/dutchf3_voxel/configs/texture_net.yaml new file mode 100644 index 00000000..aeeffb86 --- /dev/null +++ b/contrib/experiments/interpretation/dutchf3_voxel/configs/texture_net.yaml @@ -0,0 +1,41 @@ +# TextureNet configuration + +CUDNN: + BENCHMARK: true + DETERMINISTIC: false + ENABLED: true +GPUS: (0,) +OUTPUT_DIR: 'output' +LOG_DIR: 'log' +WORKERS: 4 +PRINT_FREQ: 10 +LOG_CONFIG: logging.conf +SEED: 2019 +WINDOW_SIZE: 65 + +DATASET: + NUM_CLASSES: 2 + ROOT: /home/maxkaz/data/dutchf3 + FILENAME: data.segy + +MODEL: + NAME: texture_net + IN_CHANNELS: 1 + NUM_FILTERS: 50 + +TRAIN: + BATCH_SIZE_PER_GPU: 32 + END_EPOCH: 5000 + LR: 0.02 + MOMENTUM: 0.9 + WEIGHT_DECAY: 0.0001 + DEPTH: "voxel" # Options are No, Patch, Section and Voxel + MODEL_DIR: "models" + +VALIDATION: + BATCH_SIZE_PER_GPU: 32 + +TEST: + MODEL_PATH: "" + SPLIT: 'Both' # Can be Both, Test1, Test2 + diff --git a/contrib/experiments/interpretation/dutchf3_voxel/default.py b/contrib/experiments/interpretation/dutchf3_voxel/default.py new file mode 100644 index 00000000..100da598 --- /dev/null +++ b/contrib/experiments/interpretation/dutchf3_voxel/default.py @@ -0,0 +1,82 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from yacs.config import CfgNode as CN + +_C = CN() + +# Cudnn related params +_C.CUDNN = CN() +_C.CUDNN.BENCHMARK = True +_C.CUDNN.DETERMINISTIC = False +_C.CUDNN.ENABLED = True + +_C.GPUS = (0,) +_C.OUTPUT_DIR = "output" # This will be the base directory for all output, such as logs and saved models +_C.LOG_DIR = "" # This will be a subdirectory inside OUTPUT_DIR +_C.WORKERS = 4 +_C.PRINT_FREQ = 20 +_C.LOG_CONFIG = "logging.conf" +_C.SEED = 42 +# size of voxel cube: WINDOW_SIZE x WINDOW_SIZE x WINDOW_SIZE; used for 3D models only +_C.WINDOW_SIZE = 65 + +# DATASET related params +_C.DATASET = CN() +_C.DATASET.NUM_CLASSES = 2 +_C.DATASET.ROOT = "" +_C.DATASET.FILENAME = "data.segy" + +# common params for NETWORK +_C.MODEL = CN() +_C.MODEL.NAME = "texture_net" +_C.MODEL.IN_CHANNELS = 1 +_C.MODEL.NUM_FILTERS = 50 +_C.MODEL.EXTRA = CN(new_allowed=True) + +# training +_C.TRAIN = CN() +_C.TRAIN.BATCH_SIZE_PER_GPU = 32 +# number of batches per epoch +_C.TRAIN.BATCH_PER_EPOCH = 10 +# total number of epochs +_C.TRAIN.END_EPOCH = 200 +_C.TRAIN.LR = 0.01 +_C.TRAIN.MOMENTUM = 0.9 +_C.TRAIN.WEIGHT_DECAY = 0.0001 +_C.TRAIN.DEPTH = "voxel" # Options are None, Patch and Section +_C.TRAIN.MODEL_DIR = "models" # This will be a subdirectory inside OUTPUT_DIR + +# validation +_C.VALIDATION = CN() +_C.VALIDATION.BATCH_SIZE_PER_GPU = 32 + +# TEST +_C.TEST = CN() +_C.TEST.MODEL_PATH = "" +_C.TEST.SPLIT = "Both" # Can be Both, Test1, Test2 + + +def update_config(cfg, options=None, config_file=None): + cfg.defrost() + + if config_file: + cfg.merge_from_file(config_file) + + if options: + cfg.merge_from_list(options) + + cfg.freeze() + + +if __name__ == "__main__": + import sys + + with open(sys.argv[1], "w") as f: + print(_C, file=f) diff --git a/contrib/experiments/interpretation/dutchf3_voxel/logging.conf b/contrib/experiments/interpretation/dutchf3_voxel/logging.conf new file mode 100644 index 00000000..56334fc4 --- /dev/null +++ b/contrib/experiments/interpretation/dutchf3_voxel/logging.conf @@ -0,0 +1,34 @@ +[loggers] +keys=root,__main__,event_handlers + +[handlers] +keys=consoleHandler + +[formatters] +keys=simpleFormatter + +[logger_root] +level=INFO +handlers=consoleHandler + +[logger___main__] +level=INFO +handlers=consoleHandler +qualname=__main__ +propagate=0 + +[logger_event_handlers] +level=INFO +handlers=consoleHandler +qualname=event_handlers +propagate=0 + +[handler_consoleHandler] +class=StreamHandler +level=INFO +formatter=simpleFormatter +args=(sys.stdout,) + +[formatter_simpleFormatter] +format=%(asctime)s - %(name)s - %(levelname)s - %(message)s + diff --git a/contrib/experiments/interpretation/dutchf3_voxel/train.py b/contrib/experiments/interpretation/dutchf3_voxel/train.py new file mode 100644 index 00000000..bd8cdf4b --- /dev/null +++ b/contrib/experiments/interpretation/dutchf3_voxel/train.py @@ -0,0 +1,230 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# /* spell-checker: disable */ + +import logging +import logging.config +from os import path + +import fire +import numpy as np +import torch +from torch.utils import data +from ignite.engine import Events +from ignite.handlers import ModelCheckpoint +from ignite.metrics import Loss +from ignite.utils import convert_tensor +from tqdm import tqdm + +from deepseismic_interpretation.dutchf3.data import get_voxel_loader +from deepseismic_interpretation.models.texture_net import TextureNet + +from cv_lib.utils import load_log_configuration +from cv_lib.event_handlers import ( + SnapshotHandler, + logging_handlers, + tensorboard_handlers, +) +from cv_lib.event_handlers.logging_handlers import Evaluator +from cv_lib.event_handlers.tensorboard_handlers import create_summary_writer + +from cv_lib.segmentation.metrics import ( + pixelwise_accuracy, + class_accuracy, + mean_class_accuracy, + class_iou, + mean_iou, +) +from cv_lib.segmentation import extract_metric_from + +# from cv_lib.segmentation.dutchf3.engine import ( +# create_supervised_evaluator, +# create_supervised_trainer, +# ) +# Use ignite generic versions for now +from ignite.engine import create_supervised_trainer, create_supervised_evaluator + +from default import _C as config +from default import update_config + + +def _prepare_batch(batch, device=None, non_blocking=False, t_type=torch.FloatTensor): + x, y = batch + new_x = convert_tensor(torch.squeeze(x, 1), device=device, non_blocking=non_blocking) + new_y = convert_tensor(torch.unsqueeze(y, 2), device=device, non_blocking=non_blocking) + if device == "cuda": + return ( + new_x.type(t_type).cuda(), + torch.unsqueeze(new_y, 3).type(torch.LongTensor).cuda(), + ) + else: + return new_x.type(t_type), torch.unsqueeze(new_y, 3).type(torch.LongTensor) + + +def run(*options, cfg=None): + """Run training and validation of model + + Notes: + Options can be passed in via the options argument and loaded from the cfg file + Options from default.py will be overridden by options loaded from cfg file + Options passed in via options argument will override option loaded from cfg file + + Args: + *options (str,int ,optional): Options used to overide what is loaded from the + config. To see what options are available consult + default.py + cfg (str, optional): Location of config file to load. Defaults to None. + """ + + update_config(config, options=options, config_file=cfg) + + # Start logging + load_log_configuration(config.LOG_CONFIG) + logger = logging.getLogger(__name__) + logger.debug(config.WORKERS) + torch.backends.cudnn.benchmark = config.CUDNN.BENCHMARK + + torch.manual_seed(config.SEED) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(config.SEED) + np.random.seed(seed=config.SEED) + + # load the data + TrainVoxelLoader = get_voxel_loader(config) + + train_set = TrainVoxelLoader( + config.DATASET.ROOT, + config.DATASET.FILENAME, + split="train", + window_size=config.WINDOW_SIZE, + len=config.TRAIN.BATCH_SIZE_PER_GPU * config.TRAIN.BATCH_PER_EPOCH, + batch_size=config.TRAIN.BATCH_SIZE_PER_GPU, + ) + val_set = TrainVoxelLoader( + config.DATASET.ROOT, + config.DATASET.FILENAME, + split="val", + window_size=config.WINDOW_SIZE, + len=config.TRAIN.BATCH_SIZE_PER_GPU * config.TRAIN.BATCH_PER_EPOCH, + batch_size=config.TRAIN.BATCH_SIZE_PER_GPU, + ) + + n_classes = train_set.n_classes + + # set dataset length to batch size to be consistent with 5000 iterations + # each of size 32 in the original Waldeland implementation + train_loader = data.DataLoader( + train_set, batch_size=config.TRAIN.BATCH_SIZE_PER_GPU, num_workers=config.WORKERS, shuffle=False, + ) + val_loader = data.DataLoader( + val_set, batch_size=config.VALIDATION.BATCH_SIZE_PER_GPU, num_workers=config.WORKERS, shuffle=False, + ) + + # this is how we import model for CV - here we're importing a seismic + # segmentation model + model = TextureNet(n_classes=config.DATASET.NUM_CLASSES) + + optimizer = torch.optim.Adam( + model.parameters(), + lr=config.TRAIN.LR, + # momentum=config.TRAIN.MOMENTUM, + weight_decay=config.TRAIN.WEIGHT_DECAY, + ) + + device = "cpu" + + if torch.cuda.is_available(): + device = "cuda" + model = model.cuda() + + loss = torch.nn.CrossEntropyLoss() + + trainer = create_supervised_trainer(model, optimizer, loss, prepare_batch=_prepare_batch, device=device) + + desc = "ITERATION - loss: {:.2f}" + pbar = tqdm(initial=0, leave=False, total=len(train_loader), desc=desc.format(0)) + + # add model checkpointing + output_dir = path.join(config.OUTPUT_DIR, config.TRAIN.MODEL_DIR) + checkpoint_handler = ModelCheckpoint( + output_dir, "model", save_interval=1, n_saved=3, create_dir=True, require_empty=False, + ) + + criterion = torch.nn.CrossEntropyLoss(reduction="mean") + + # save model at each epoch + trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {config.MODEL.NAME: model}) + + def _select_pred_and_mask(model_out): + # receive a tuple of (x, y_pred), y + # so actually in line 51 of + # cv_lib/cv_lib/segmentation/dutch_f3/metrics/__init__.py + # we do the following line, so here we just select the model + # _, y_pred = torch.max(model_out[0].squeeze(), 1, keepdim=True) + y_pred = model_out[0].squeeze() + y = model_out[1].squeeze() + return (y_pred.squeeze(), y) + + evaluator = create_supervised_evaluator( + model, + metrics={ + "nll": Loss(criterion, device=device), + "pixa": pixelwise_accuracy(n_classes, output_transform=_select_pred_and_mask, device=device), + "cacc": class_accuracy(n_classes, output_transform=_select_pred_and_mask, device=device), + "mca": mean_class_accuracy(n_classes, output_transform=_select_pred_and_mask, device=device), + "ciou": class_iou(n_classes, output_transform=_select_pred_and_mask, device=device), + "mIoU": mean_iou(n_classes, output_transform=_select_pred_and_mask, device=device), + }, + device=device, + prepare_batch=_prepare_batch, + ) + + # Set the validation run to start on the epoch completion of the training run + trainer.add_event_handler(Events.EPOCH_COMPLETED, Evaluator(evaluator, val_loader)) + + summary_writer = create_summary_writer(log_dir=path.join(output_dir, config.LOG_DIR)) + + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + logging_handlers.log_metrics( + "Validation results", + metrics_dict={ + "mIoU": "Avg IoU :", + "nll": "Avg loss :", + "pixa": "Pixelwise Accuracy :", + "mca": "Mean Class Accuracy :", + }, + ), + ) + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + tensorboard_handlers.log_metrics( + summary_writer, + trainer, + "epoch", + metrics_dict={"mIoU": "Validation/IoU", "nll": "Validation/Loss", "mca": "Validation/MCA",}, + ), + ) + + summary_writer = create_summary_writer(log_dir=path.join(output_dir, config.LOG_DIR)) + + snapshot_duration = 1 + + def snapshot_function(): + return (trainer.state.iteration % snapshot_duration) == 0 + + checkpoint_handler = SnapshotHandler( + path.join(output_dir, config.TRAIN.MODEL_DIR), + config.MODEL.NAME, + extract_metric_from("mIoU"), + snapshot_function, + ) + evaluator.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {"model": model}) + + logger.info("Starting training") + trainer.run(train_loader, max_epochs=config.TRAIN.END_EPOCH // config.TRAIN.BATCH_PER_EPOCH) + pbar.close() + + +if __name__ == "__main__": + fire.Fire(run) diff --git a/contrib/experiments/interpretation/voxel2pixel/README.md b/contrib/experiments/interpretation/voxel2pixel/README.md new file mode 100644 index 00000000..8e1f743c --- /dev/null +++ b/contrib/experiments/interpretation/voxel2pixel/README.md @@ -0,0 +1,54 @@ +# Voxel to Pixel approach to Seismic Interpretation + +The code which is used in this approach is greatly described in the paper +
+**Convolutional Neural Networks for Automated Seismic Interpretation**,
+A. U. Waldeland, A. C. Jensen, L. Gelius and A. H. S. Solberg
+[*The Leading Edge, July 2018*](https://library.seg.org/doi/abs/10.1190/tle37070529.1) + +There is also an +EAGE E-lecture which you can watch: [*Seismic interpretation with deep learning*](https://www.youtube.com/watch?v=lm85Ap4OstM) (YouTube) + +### Setup to get started +- make sure you follow `README.md` file in root of repo to install all the proper dependencies. +- downgrade TensorFlow and pyTorch's CUDA: + - downgrade TensorFlow by running `pip install tensorflow-gpu==1.14` + - make sure pyTorch uses downgraded CUDA `pip install torch==1.3.1+cu92 torchvision==0.4.2+cu92 -f https://download.pytorch.org/whl/torch_stable.html` +- download the data by running `contrib/scrips/get_F3_voxel.sh` from the `contrib` folder of this repo. +This will download the training and validation labels/masks. +- to get the main input dataset which is the [Dutch F3 dataset](https://terranubis.com/datainfo/Netherlands-Offshore-F3-Block-Complete), +navigate to [MalenoV](https://github.com/bolgebrygg/MalenoV) project website and follow the links (which will lead to +[this](https://drive.google.com/drive/folders/0B7brcf-eGK8CbGhBdmZoUnhiTWs) download). Save this file as +`interpretation/voxel2pixel/F3/data.segy` + +If you want to revert downgraded packages, just run `conda env update -f environment/anaconda/local/environment.yml` from the root folder of the repo. + +### Monitoring progress with TensorBoard +- from the `voxel2pixel` directory, run `tensorboard --logdir='log'` (all runtime logging information is +written to the `log` folder
+- open a web-browser and go to localhost:6006
+More information can be found [here](https://www.tensorflow.org/get_started/summaries_and_tensorboard#launching_tensorboard). + +### Usage +- `python train.py` will train the CNN and produce a model after a few hours on a decent gaming GPU +with at least 6GB of onboard memory
+- `python test_parallel.py` - Example of how the trained CNN can be applied to predict salt in a slice or +the full cube in distributed fashion on a single multi-GPU machine (single GPU mode is also supported). +In addition it shows how learned attributes can be extracted.
+ +### Files +In addition, it may be useful to have a look on these files
+- texture_net.py - this is where the network is defined
+- batch.py - provides functionality to generate training batches with random augmentation
+- data.py - load/save data sets with segy-format and labeled slices as images
+- tb_logger.py - connects to the tensorboard functionality
+- utils.py - some help functions
+- test_parallel.py - multi-GPU prediction script for scoring
+ +### Using a different data set and custom training labels +If you want to use a different data set, do the following: +- Make a new folder where you place the segy-file +- Make a folder for the training labels +- Save images of the slices you want to train on as 'SLICETYPE_SLICENO.png' (or jpg), where SLICETYPE is either 'inline', 'crossline', or 'timeslice' and SLICENO is the slice number. +- Draw the classes on top of the seismic data, using a simple image editing program with the class colors. Currently up to six classes are supported, indicated by the colors: red, blue, green, cyan, magenta and yellow. + diff --git a/contrib/experiments/interpretation/voxel2pixel/batch.py b/contrib/experiments/interpretation/voxel2pixel/batch.py new file mode 100644 index 00000000..d53f79e8 --- /dev/null +++ b/contrib/experiments/interpretation/voxel2pixel/batch.py @@ -0,0 +1,351 @@ +# Copyright (c) Microsoft. All rights reserved. +# Licensed under the MIT license. + +# code modified from https://github.com/waldeland/CNN-for-ASI + +import numpy as np + + +def get_random_batch( + data_cube, + label_coordinates, + im_size, + num_batch_size, + random_flip=False, + random_stretch=None, + random_rot_xy=None, + random_rot_z=None, +): + """ + Returns a batch of augmented samples with center pixels randomly drawn from label_coordinates + + Args: + data_cube: 3D numpy array with floating point velocity values + label_coordinates: 3D coordinates of the labeled training slice + im_size: size of the 3D voxel which we're cutting out around each label_coordinate + num_batch_size: size of the batch + random_flip: bool to perform random voxel flip + random_stretch: bool to enable random stretch + random_rot_xy: bool to enable random rotation of the voxel around dim-0 and dim-1 + random_rot_z: bool to enable random rotation around dim-2 + + Returns: + a tuple of batch numpy array array of data with dimension + (batch, 1, data_cube.shape[0], data_cube.shape[1], data_cube.shape[2]) and the associated labels as an array + of size (batch). + """ + + # Make 3 im_size elements + if isinstance(im_size, int): + im_size = [im_size, im_size, im_size] + + # Output arrays + batch = np.zeros([num_batch_size, 1, im_size[0], im_size[1], im_size[2]]) + ret_labels = np.zeros([num_batch_size]) + + class_keys = list(label_coordinates) + n_classes = len(class_keys) + + # Loop through batch + n_for_class = 0 + class_ind = 0 + for i in range(num_batch_size): + + # Start by getting a grid centered around (0,0,0) + grid = get_grid(im_size) + + # Apply random flip + if random_flip: + grid = augment_flip(grid) + + # Apply random rotations + if random_rot_xy: + grid = augment_rot_xy(grid, random_rot_xy) + if random_rot_z: + grid = augment_rot_z(grid, random_rot_z) + + # Apply random stretch + if random_stretch: + grid = augment_stretch(grid, random_stretch) + + # Pick random location from the label_coordinates for this class: + coords_for_class = label_coordinates[class_keys[class_ind]] + random_index = rand_int(0, coords_for_class.shape[1]) + coord = coords_for_class[:, random_index : random_index + 1] + + # Move grid to be centered around this location + grid += coord + + # Interpolate samples at grid from the data: + sample = trilinear_interpolation(data_cube, grid) + + # Insert in output arrays + ret_labels[i] = class_ind + batch[i, 0, :, :, :] = np.reshape(sample, (im_size[0], im_size[1], im_size[2])) + + # We seek to have a balanced batch with equally many samples from each class. + n_for_class += 1 + if n_for_class + 1 > int(0.5 + num_batch_size / float(n_classes)): + if class_ind < n_classes - 1: + class_ind += 1 + n_for_class = 0 + + return batch, ret_labels + + +def get_grid(im_size): + """ + getGrid returns z,x,y coordinates centered around (0,0,0) + + Args: + im_size: size of window + + Returns + numpy int array with size: 3 x im_size**3 + """ + win0 = np.linspace(-im_size[0] // 2, im_size[0] // 2, im_size[0]) + win1 = np.linspace(-im_size[1] // 2, im_size[1] // 2, im_size[1]) + win2 = np.linspace(-im_size[2] // 2, im_size[2] // 2, im_size[2]) + + x0, x1, x2 = np.meshgrid(win0, win1, win2, indexing="ij") + + ex0 = np.expand_dims(x0.ravel(), 0) + ex1 = np.expand_dims(x1.ravel(), 0) + ex2 = np.expand_dims(x2.ravel(), 0) + + grid = np.concatenate((ex0, ex1, ex2), axis=0) + + return grid + + +def augment_flip(grid): + """ + Random flip of non-depth axes. + + Args: + grid: 3D coordinates of the voxel + + Returns: + flipped grid coordinates + """ + + # Flip x axis + if rand_bool(): + grid[1, :] = -grid[1, :] + + # Flip y axis + if rand_bool(): + grid[2, :] = -grid[2, :] + + return grid + + +def augment_stretch(grid, stretch_factor): + """ + Random stretch/scale + + Args: + grid: 3D coordinate grid of the voxel + stretch_factor: this is actually a boolean which triggers stretching + TODO: change this to just call the function and not do -1,1 in rand_float + + Returns: + stretched grid coordinates + """ + stretch = rand_float(-stretch_factor, stretch_factor) + grid *= 1 + stretch + return grid + + +def augment_rot_xy(grid, random_rot_xy): + """ + Random rotation + + Args: + grid: coordinate grid list of 3D points + random_rot_xy: this is actually a boolean which triggers rotation + TODO: change this to just call the function and not do -1,1 in rand_float + + Returns: + randomly rotated grid + """ + theta = np.deg2rad(rand_float(-random_rot_xy, random_rot_xy)) + x = grid[2, :] * np.cos(theta) - grid[1, :] * np.sin(theta) + y = grid[2, :] * np.sin(theta) + grid[1, :] * np.cos(theta) + grid[1, :] = x + grid[2, :] = y + return grid + + +def augment_rot_z(grid, random_rot_z): + """ + Random tilt around z-axis (dim-2) + + Args: + grid: coordinate grid list of 3D points + random_rot_z: this is actually a boolean which triggers rotation + TODO: change this to just call the function and not do -1,1 in rand_float + + Returns: + randomly tilted coordinate grid + """ + theta = np.deg2rad(rand_float(-random_rot_z, random_rot_z)) + z = grid[0, :] * np.cos(theta) - grid[1, :] * np.sin(theta) + x = grid[0, :] * np.sin(theta) + grid[1, :] * np.cos(theta) + grid[0, :] = z + grid[1, :] = x + return grid + + +def trilinear_interpolation(input_array, indices): + """ + Linear interpolation + code taken from + http://stackoverflow.com/questions/6427276/3d-interpolation-of-numpy-arrays-without-scipy + + Args: + input_array: 3D data array + indices: 3D grid coordinates + + Returns: + interpolated input array + """ + + x_indices, y_indices, z_indices = indices[0:3] + + n0, n1, n2 = input_array.shape + + x0 = x_indices.astype(np.integer) + y0 = y_indices.astype(np.integer) + z0 = z_indices.astype(np.integer) + x1 = x0 + 1 + y1 = y0 + 1 + z1 = z0 + 1 + + # put all samples outside datacube to 0 + inds_out_of_range = ( + (x0 < 0) + | (x1 < 0) + | (y0 < 0) + | (y1 < 0) + | (z0 < 0) + | (z1 < 0) + | (x0 >= n0) + | (x1 >= n0) + | (y0 >= n1) + | (y1 >= n1) + | (z0 >= n2) + | (z1 >= n2) + ) + + x0[inds_out_of_range] = 0 + y0[inds_out_of_range] = 0 + z0[inds_out_of_range] = 0 + x1[inds_out_of_range] = 0 + y1[inds_out_of_range] = 0 + z1[inds_out_of_range] = 0 + + x = x_indices - x0 + y = y_indices - y0 + z = z_indices - z0 + output = ( + input_array[x0, y0, z0] * (1 - x) * (1 - y) * (1 - z) + + input_array[x1, y0, z0] * x * (1 - y) * (1 - z) + + input_array[x0, y1, z0] * (1 - x) * y * (1 - z) + + input_array[x0, y0, z1] * (1 - x) * (1 - y) * z + + input_array[x1, y0, z1] * x * (1 - y) * z + + input_array[x0, y1, z1] * (1 - x) * y * z + + input_array[x1, y1, z0] * x * y * (1 - z) + + input_array[x1, y1, z1] * x * y * z + ) + + output[inds_out_of_range] = 0 + return output + + +def rand_float(low, high): + """ + Generate random floating point number between two limits + + Args: + low: low limit + high: high limit + + Returns: + single random floating point number + """ + return (high - low) * np.random.random_sample() + low + + +def rand_int(low, high): + """ + Generate random integer between two limits + + Args: + low: low limit + high: high limit + + Returns: + random integer between two limits + """ + return np.random.randint(low, high) + + +def rand_bool(): + """ + Generate random boolean. + + Returns: + Random boolean + """ + return bool(np.random.randint(0, 2)) + + +""" +TODO: the following is not needed and should be added as tests later. + +# Test the batch-functions +if __name__ == "__main__": + from data import read_segy, read_labels, get_slice + import tb_logger + import numpy as np + import os + + data, data_info = read_segy(os.path.join("F3", "data.segy")) + + train_coordinates = {"1": np.expand_dims(np.array([50, 50, 50]), 1)} + + logger = tb_logger.TBLogger("log", "batch test") + + [batch, labels] = get_random_batch(data, train_coordinates, 65, 32) + logger.log_images("normal", batch) + + [batch, labels] = get_random_batch( + data, train_coordinates, 65, 32, random_flip=True + ) + logger.log_images("flipping", batch) + + [batch, labels] = get_random_batch( + data, train_coordinates, 65, 32, random_stretch=0.50 + ) + logger.log_images("stretching", batch) + + [batch, labels] = get_random_batch( + data, train_coordinates, 65, 32, random_rot_xy=180 + ) + logger.log_images("rot", batch) + + [batch, labels] = get_random_batch( + data, train_coordinates, 65, 32, random_rot_z=15 + ) + logger.log_images("dip", batch) + + train_cls_imgs, train_coordinates = read_labels( + os.path.join("F3", "train"), data_info + ) + [batch, labels] = get_random_batch(data, train_coordinates, 65, 32) + logger.log_images("salt", batch[:16, :, :, :, :]) + logger.log_images("not salt", batch[16:, :, :, :, :]) + + logger.log_images("data", data[:, :, 50]) +""" diff --git a/contrib/experiments/interpretation/voxel2pixel/data.py b/contrib/experiments/interpretation/voxel2pixel/data.py new file mode 100644 index 00000000..bdcad76a --- /dev/null +++ b/contrib/experiments/interpretation/voxel2pixel/data.py @@ -0,0 +1,326 @@ +# Copyright (c) Microsoft. All rights reserved. +# Licensed under the MIT license. + +# code modified from https://github.com/waldeland/CNN-for-ASI + +from __future__ import print_function +from os.path import isfile, join + +import segyio +from os import listdir +import numpy as np +import scipy.misc + + +def read_segy(filename): + """ + Read in a SEGY-format file given a filename + + Args: + filename: input filename + + Returns: + numpy data array and its info as a dictionary (tuple) + + """ + print("Loading data cube from", filename, "with:") + + # Read full data cube + data = segyio.tools.cube(filename) + + # Put temporal axis first + data = np.moveaxis(data, -1, 0) + + # Make data cube fast to access + data = np.ascontiguousarray(data, "float32") + + # Read meta data + segyfile = segyio.open(filename, "r") + print(" Crosslines: ", segyfile.xlines[0], ":", segyfile.xlines[-1]) + print(" Inlines: ", segyfile.ilines[0], ":", segyfile.ilines[-1]) + print(" Timeslices: ", "1", ":", data.shape[0]) + + # Make dict with cube-info + # TODO: read this from segy + # Read dt and other params needed to do create a new + data_info = { + "crossline_start": segyfile.xlines[0], + "inline_start": segyfile.ilines[0], + "timeslice_start": 1, + "shape": data.shape, + } + + return data, data_info + + +def write_segy(out_filename, in_filename, out_cube): + """ + Writes out_cube to a segy-file (out_filename) with same header/size as in_filename + + Args: + out_filename: + in_filename: + out_cube: + + Returns: + + """ + # Select last channel + if type(out_cube) is list: + out_cube = out_cube[-1] + + print("Writing interpretation to " + out_filename) + # Copy segy file + from shutil import copyfile + + copyfile(in_filename, out_filename) + + # Moving temporal axis back again + out_cube = np.moveaxis(out_cube, 0, -1) + + # Open out-file + with segyio.open(out_filename, "r+") as src: + iline_start = src.ilines[0] + dtype = src.iline[iline_start].dtype + # loop through inlines and insert output + for i in src.ilines: + iline = out_cube[i - iline_start, :, :] + src.iline[i] = np.ascontiguousarray(iline.astype(dtype)) + + # TODO: rewrite this whole function + # Moving temporal axis first again - just in case the user want to keep working on it + out_cube = np.moveaxis(out_cube, -1, 0) + + print("Writing interpretation - Finished") + return + + +# Alternative writings for slice-type +inline_alias = ["inline", "in-line", "iline", "y"] +crossline_alias = ["crossline", "cross-line", "xline", "x"] +timeslice_alias = ["timeslice", "time-slice", "t", "z", "depthslice", "depth"] + + +def read_labels(fname, data_info): + """ + Read labels from an image. + + Args: + fname: filename of labelling mask (image) + data_info: dictionary describing the data + + Returns: + list of labels and list of coordinates + """ + + label_imgs = [] + label_coordinates = {} + + # Find image files in folder + + tmp = fname.split("/")[-1].split("_") + slice_type = tmp[0].lower() + tmp = tmp[1].split(".") + slice_no = int(tmp[0]) + + if slice_type not in inline_alias + crossline_alias + timeslice_alias: + print( + "File:", fname, "could not be loaded.", "Unknown slice type", + ) + return None + + if slice_type in inline_alias: + slice_type = "inline" + if slice_type in crossline_alias: + slice_type = "crossline" + if slice_type in timeslice_alias: + slice_type = "timeslice" + + # Read file + print("Loading labels for", slice_type, slice_no, "with") + img = scipy.misc.imread(fname) + img = interpolate_to_fit_data(img, slice_type, slice_no, data_info) + label_img = parse_labels_in_image(img) + + # Get coordinates for slice + coords = get_coordinates_for_slice(slice_type, slice_no, data_info) + + # Loop through labels in label_img and append to label_coordinates + for cls in np.unique(label_img): + if cls > -1: + if str(cls) not in label_coordinates.keys(): + label_coordinates[str(cls)] = np.array(np.zeros([3, 0])) + inds_with_cls = label_img == cls + cords_with_cls = coords[:, inds_with_cls.ravel()] + label_coordinates[str(cls)] = np.concatenate((label_coordinates[str(cls)], cords_with_cls), 1) + print( + " ", str(np.sum(inds_with_cls)), "labels for class", str(cls), + ) + if len(np.unique(label_img)) == 1: + print(" ", 0, "labels", str(cls)) + + # Add label_img to output + label_imgs.append([label_img, slice_type, slice_no]) + + return label_imgs, label_coordinates + + +# Add colors to this table to make it possible to have more classes +class_color_coding = [ + [0, 0, 255], # blue + [0, 255, 0], # green + [0, 255, 255], # cyan + [255, 0, 0], # red + [255, 0, 255], # blue + [255, 255, 0], # yellow +] + + +def parse_labels_in_image(img): + """ + Convert RGB image to class img. + + Args: + img: 3-channel image array + + Returns: + monotonically increasing class labels + """ + label_img = np.int16(img[:, :, 0]) * 0 - 1 # -1 = no class + + # decompose color channels (#Alpha is ignored) + r = img[:, :, 0] + g = img[:, :, 1] + b = img[:, :, 2] + + # Alpha channel + if img.shape[2] == 4: + a = 1 - img.shape[2] // 255 + r = r * a + g = g * a + b = b * a + + tolerance = 1 + # Go through classes and find pixels with this class + cls = 0 + for color in class_color_coding: + # Find pixels with these labels + inds = ( + (np.abs(r - color[0]) < tolerance) & (np.abs(g - color[1]) < tolerance) & (np.abs(b - color[2]) < tolerance) + ) + label_img[inds] = cls + cls += 1 + + return label_img + + +def interpolate_to_fit_data(img, slice_type, slice_no, data_info): + """ + Function to resize image if needed + + Args: + img: image array + slice_type: inline, crossline or timeslice slice type + slice_no: slice number + data_info: data info dictionary distracted from SEGY file + + Returns: + resized image array + + """ + + # Get wanted output size + if slice_type == "inline": + n0 = data_info["shape"][0] + n1 = data_info["shape"][2] + elif slice_type == "crossline": + n0 = data_info["shape"][0] + n1 = data_info["shape"][1] + elif slice_type == "timeslice": + n0 = data_info["shape"][1] + n1 = data_info["shape"][2] + return scipy.misc.imresize(img, (n0, n1), interp="nearest") + + +def get_coordinates_for_slice(slice_type, slice_no, data_info): + """ + + Get coordinates for slice in the full cube + + Args: + slice_type: type of slice, e.g. inline, crossline, etc + slice_no: slice number + data_info: data dictionary array + + Returns: + index coordinates of the voxel + + """ + ds = data_info["shape"] + + # Coordinates for cube + x0, x1, x2 = np.meshgrid( + np.linspace(0, ds[0] - 1, ds[0]), + np.linspace(0, ds[1] - 1, ds[1]), + np.linspace(0, ds[2] - 1, ds[2]), + indexing="ij", + ) + if slice_type == "inline": + start = data_info["inline_start"] + slice_no = slice_no - start + + x0 = x0[:, slice_no, :] + x1 = x1[:, slice_no, :] + x2 = x2[:, slice_no, :] + elif slice_type == "crossline": + start = data_info["crossline_start"] + slice_no = slice_no - start + x0 = x0[:, :, slice_no] + x1 = x1[:, :, slice_no] + x2 = x2[:, :, slice_no] + + elif slice_type == "timeslice": + start = data_info["timeslice_start"] + slice_no = slice_no - start + x0 = x0[slice_no, :, :] + x1 = x1[slice_no, :, :] + x2 = x2[slice_no, :, :] + + # Collect indexes + x0 = np.expand_dims(x0.ravel(), 0) + x1 = np.expand_dims(x1.ravel(), 0) + x2 = np.expand_dims(x2.ravel(), 0) + coords = np.concatenate((x0, x1, x2), axis=0) + + return coords + + +def get_slice(data, data_info, slice_type, slice_no, window=0): + """ + Return data-slice + + Args: + data: input 3D voxel numpy array + data_info: data info dictionary + slice_type: type of slice, like inline, crossline, etc + slice_no: slice number + window: window size around center pixel + + Returns: + 2D slice of the voxel as a numpy array + + """ + + if slice_type == "inline": + start = data_info["inline_start"] + + elif slice_type == "crossline": + start = data_info["crossline_start"] + + elif slice_type == "timeslice": + start = data_info["timeslice_start"] + + slice_no = slice_no - start + slice = data[:, slice_no - window : slice_no + window + 1, :] + + return np.squeeze(slice) diff --git a/contrib/experiments/interpretation/voxel2pixel/tb_logger.py b/contrib/experiments/interpretation/voxel2pixel/tb_logger.py new file mode 100644 index 00000000..c6a894dc --- /dev/null +++ b/contrib/experiments/interpretation/voxel2pixel/tb_logger.py @@ -0,0 +1,181 @@ +# Copyright (c) Microsoft. All rights reserved. +# Licensed under the MIT license. + +# code modified from https://github.com/waldeland/CNN-for-ASI + +from __future__ import print_function +from os.path import join + +# TODO: make this nicer and remove the non-bare except for PEP8 compliance +try: + import tensorflow as tf +except: + print("Tensorflow could not be imported, therefore tensorboard cannot be used.") + +from io import BytesIO +import matplotlib.pyplot as plt +import numpy as np +import torch +import datetime + +# TODO: it looks like the majority of the methods of this class are static and as such they should be in utils +class TBLogger(object): + """ + TensorBoard logger class + """ + + def __init__(self, log_dir, folder_name=""): + + self.log_dir = join(log_dir, folder_name + " " + datetime.datetime.now().strftime("%I%M%p, %B %d, %Y"),) + self.log_dir = self.log_dir.replace("//", "/") + self.writer = tf.summary.FileWriter(self.log_dir) + + def log_scalar(self, tag, value, step=0): + """ + Add scalar + + Args: + tag: tag + value: simple_value + step: step + + """ + summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)]) + self.writer.add_summary(summary, step) + + # TODO: this should probably be a static method - take care of this when re-writing the whole thing + def make_list_of_2d_array(self, im): + """ + Flatten 2D array to a list + + Args: + im: image + + Returns: + Flattened image list + + """ + if isinstance(im, list): + return im + ims = [] + if len(im.shape) == 2: + ims.append(im) + elif len(im.shape) == 3: + for i in range(im.shape[0]): + ims.append(np.squeeze(im[i, :, :])) + + elif len(im.shape) == 4: + for i in range(im.shape[0]): + ims.append(np.squeeze(im[i, 0, :, :])) + return ims + + def log_images(self, tag, images, step=0, dim=2, max_imgs=50, cm="jet"): + """ + Log images to TensorBoard + + Args: + tag: image tag + images: list of images + step: training step + dim: image shape (3 for voxel) + max_imgs: max number of images + cm: colormap + + """ + + # Make sure images are on numpy format in case the input is a Torch-variable + images = self.convert_to_numpy(images) + + if len(images.shape) > 2: + dim = 3 + + # Make list of images + if dim == 2: + images = self.make_list_of_2d_array(images) + + # If 3D we make one list for each slice-type + if dim == 3: + new_images_ts, new_images_il, new_images_cl = self.get_slices_from_3d(images) + self.log_images(tag + "_timeslice", new_images_ts, step, 2, max_imgs) + self.log_images(tag + "_inline", new_images_il, step, 2, max_imgs) + self.log_images(tag + "_crossline", new_images_cl, step, 2, max_imgs) + return + + im_summaries = [] + + for nr, img in enumerate(images): + + # Grayscale + if cm == "gray" or cm == "grey": + img = img.astype("float") + img = np.repeat(np.expand_dims(img, 2), 3, 2) + img -= img.min() + img /= img.max() + img *= 255 + img = img.astype("uint8") + + # Write the image to a string + s = BytesIO() + plt.imsave(s, img, format="png") + + # Create an Image object + img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(), height=img.shape[0], width=img.shape[1],) + # Create a Summary value + im_summaries.append(tf.Summary.Value(tag="%s/%d" % (tag, nr), image=img_sum)) + + # if nr == max_imgs-1: + # break + + # Create and write Summary + summary = tf.Summary(value=im_summaries) + self.writer.add_summary(summary, step) + + # TODO: probably another static method + def get_slices_from_3d(self, img): + """ + Cuts out middle slices from image + + Args: + img: image array + + """ + + new_images_ts = [] + new_images_il = [] + new_images_cl = [] + + if len(img.shape) == 3: + new_images_ts.append(np.squeeze(img[img.shape[0] / 2, :, :])) + new_images_il.append(np.squeeze(img[:, img.shape[1] / 2, :])) + new_images_cl.append(np.squeeze(img[:, :, img.shape[2] / 2])) + + elif len(img.shape) == 4: + for i in range(img.shape[0]): + new_images_ts.append(np.squeeze(img[i, img.shape[1] / 2, :, :])) + new_images_il.append(np.squeeze(img[i, :, img.shape[2] / 2, :])) + new_images_cl.append(np.squeeze(img[i, :, :, img.shape[3] / 2])) + + elif len(img.shape) == 5: + for i in range(img.shape[0]): + new_images_ts.append(np.squeeze(img[i, 0, img.shape[2] / 2, :, :])) + new_images_il.append(np.squeeze(img[i, 0, :, img.shape[3] / 2, :])) + new_images_cl.append(np.squeeze(img[i, 0, :, :, img.shape[4] / 2])) + + return new_images_ts, new_images_il, new_images_cl + + # TODO: another static method most likely + def convert_to_numpy(self, im): + """ + Convert torch to numpy + + Args: + im: image array + + """ + + if type(im) == torch.autograd.Variable: + # Put on CPU + im = im.cpu() + # Get np-data + im = im.data.numpy() + return im diff --git a/contrib/experiments/interpretation/voxel2pixel/test_parallel.py b/contrib/experiments/interpretation/voxel2pixel/test_parallel.py new file mode 100644 index 00000000..4e095afc --- /dev/null +++ b/contrib/experiments/interpretation/voxel2pixel/test_parallel.py @@ -0,0 +1,426 @@ +# Copyright (c) Microsoft. All rights reserved. +# Licensed under the MIT license. + +# code modified from https://github.com/waldeland/CNN-for-ASI +from __future__ import print_function + +import os + +# set default number of GPUs which are discoverable +N_GPU = 4 +DEVICE_IDS = list(range(N_GPU)) +os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(x) for x in DEVICE_IDS]) + +# static parameters +RESOLUTION = 1 +# these match how the model is trained +N_CLASSES = 2 +IM_SIZE = 65 + +import random +import argparse +import json + +import torch +import torch.nn as nn +import torch.backends.cudnn as cudnn +from torch.utils.data import Dataset, DataLoader +import torch.distributed as dist + +if torch.cuda.is_available(): + device_str = os.environ["CUDA_VISIBLE_DEVICES"] + device = torch.device("cuda:" + device_str) +else: + raise Exception("No GPU detected for parallel scoring!") + +# ability to perform multiprocessing +import multiprocessing + +from os.path import join +from data import read_segy, get_slice +from texture_net import TextureNet +import itertools +import numpy as np +import tb_logger +from data import write_segy + +# graphical progress bar +from tqdm import tqdm + + +class ModelWrapper(nn.Module): + """ + Wrap TextureNet for (Distributed)DataParallel to invoke classify method + """ + + def __init__(self, texture_model): + super(ModelWrapper, self).__init__() + self.texture_model = texture_model + + def forward(self, input_net): + return self.texture_model.classify(input_net) + + +class MyDataset(Dataset): + def __init__(self, data, window, coord_list): + + # main array + self.data = data + self.coord_list = coord_list + self.window = window + self.len = len(coord_list) + + def __getitem__(self, index): + + # TODO: can we specify a pixel mathematically by index? + pixel = self.coord_list[index] + x, y, z = pixel + # TODO: current bottleneck - can we slice out voxels any faster + small_cube = self.data[ + x - self.window : x + self.window + 1, + y - self.window : y + self.window + 1, + z - self.window : z + self.window + 1, + ] + + return small_cube[np.newaxis, :, :, :], pixel + + def __len__(self): + return self.len + + +def main_worker(gpu, ngpus_per_node, args): + """ + Main worker function, given the gpu parameter and how many GPUs there are per node + it can figure out its rank + + :param gpu: rank of the process if gpu >= ngpus_per_node, otherwise just gpu ID which worker will run on. + :param ngpus_per_node: total number of GPU available on this node. + :param args: various arguments for the code in the worker. + :return: nothing + """ + + print("I got GPU", gpu) + + args.rank = gpu + + # loop around in round-robin fashion if we want to run multiple processes per GPU + args.gpu = gpu % ngpus_per_node + + # initialize the distributed process and join the group + print( + "setting rank", args.rank, "world size", args.world_size, args.dist_backend, args.dist_url, + ) + dist.init_process_group( + backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank, + ) + + # set default GPU device for this worker + torch.cuda.set_device(args.gpu) + # set up device for the rest of the code + local_device = torch.device("cuda:" + str(args.gpu)) + + # Load trained model (run train.py to create trained + network = TextureNet(n_classes=N_CLASSES) + model_state_dict = torch.load(join(args.data, "saved_model.pt"), map_location=local_device) + network.load_state_dict(model_state_dict) + network.eval() + network.cuda(args.gpu) + + # set the scoring wrapper also to eval mode + model = ModelWrapper(network) + model.eval() + model.cuda(args.gpu) + + # When using a single GPU per process and per + # DistributedDataParallel, we need to divide the batch size + # ourselves based on the total number of GPUs we have. + # Min batch size is 1 + args.batch_size = max(int(args.batch_size / ngpus_per_node), 1) + # obsolete: number of data loading workers - this is only used when reading from disk, which we're not + # args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node) + + # wrap the model for distributed use - for scoring this is not needed + # model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) + + # set to benchmark mode because we're running the same workload multiple times + cudnn.benchmark = True + + # Read 3D cube + # NOTE: we cannot pass this data manually as serialization of data into each python process is costly, + # so each worker has to load the data on its own. + data, data_info = read_segy(join(args.data, "data.segy")) + + # Get half window size + window = IM_SIZE // 2 + + # reduce data size for debugging + if args.debug: + data = data[0 : 3 * window] + + # generate full list of coordinates + # memory footprint of this isn't large yet, so not need to wrap as a generator + nx, ny, nz = data.shape + x_list = range(window, nx - window) + y_list = range(window, ny - window) + z_list = range(window, nz - window) + + print("-- generating coord list --") + # TODO: is there any way to use a generator with pyTorch data loader? + coord_list = list(itertools.product(x_list, y_list, z_list)) + + # we need to map the data manually to each rank - DistributedDataParallel doesn't do this at score time + print("take a subset of coord_list by chunk") + coord_list = list(np.array_split(np.array(coord_list), args.world_size)[args.rank]) + coord_list = [tuple(x) for x in coord_list] + + # we only score first batch in debug mode + if args.debug: + coord_list = coord_list[0 : args.batch_size] + + # prepare the data + print("setup dataset") + # TODO: RuntimeError: cannot pin 'torch.cuda.FloatTensor' only dense CPU tensors can be pinned + data_torch = torch.cuda.FloatTensor(data).cuda(args.gpu, non_blocking=True) + dataset = MyDataset(data_torch, window, coord_list) + + # not sampling like in training + # datasampler = DistributedSampler(dataset) + # just set some default epoch + # datasampler.set_epoch(1) + + # we use 0 workers because we're reading from memory + print("setting up loader") + my_loader = DataLoader( + dataset=dataset, + batch_size=args.batch_size, + shuffle=False, + num_workers=0, + pin_memory=False, + sampler=None + # sampler=datasampler + ) + + print("running loop") + + pixels_x = [] + pixels_y = [] + pixels_z = [] + predictions = [] + + # Loop through center pixels in output cube + with torch.no_grad(): + print("no grad") + for (chunk, pixel) in tqdm(my_loader): + data_input = chunk.cuda(args.gpu, non_blocking=True) + output = model(data_input) + # save and deal with it later on CPU + # we want to make sure order is preserved + pixels_x += pixel[0].tolist() + pixels_y += pixel[1].tolist() + pixels_z += pixel[2].tolist() + predictions += output.tolist() + # just score a single batch in debug mode + if args.debug: + break + + # TODO: legacy Queue Manager code from multiprocessing which we left here for illustration purposes + # result_queue.append([deepcopy(coord_list), deepcopy(predictions)]) + # result_queue.append([coord_list, predictions]) + # transform pixels into x, y, z list format + with open("results_{}.json".format(args.rank), "w") as f: + json.dump( + { + "pixels_x": pixels_x, + "pixels_y": pixels_y, + "pixels_z": pixels_z, + "preds": [int(x[0][0][0][0]) for x in predictions], + }, + f, + ) + + # TODO: we cannot use pickle to dump from multiprocess - processes lock up + # with open("result_predictions_{}.pkl".format(args.rank), "wb") as f: + # print ("dumping predictions pickle file") + # pickle.dump(predictions, f) + + +parser = argparse.ArgumentParser(description="Seismic Distributed Scoring") +parser.add_argument("-d", "--data", default="/home/maxkaz/data/dutchf3", type=str, help="default dataset folder name") +parser.add_argument( + "-s", + "--slice", + default="inline", + type=str, + choices=["inline", "crossline", "timeslice", "full"], + help="slice type which we want to score on", +) +parser.add_argument( + "-n", "--slice-num", default=339, type=int, help="slice number which we want to score", +) +parser.add_argument( + "-b", "--batch-size", default=2 ** 11, type=int, help="batch size which we use for scoring", +) +parser.add_argument( + "-p", "--n-proc-per-gpu", default=1, type=int, help="number of multiple processes to run per each GPU", +) +parser.add_argument( + "--dist-url", default="tcp://127.0.0.1:12345", type=str, help="url used to set up distributed training", +) +parser.add_argument("--dist-backend", default="nccl", type=str, help="distributed backend") +parser.add_argument("--seed", default=0, type=int, help="default random number seed") +parser.add_argument( + "--debug", action="store_true", help="debug flag - if on we will only process one batch", +) + + +def main(): + + # use distributed scoring+ + if RESOLUTION != 1: + raise Exception("Currently we only support pixel-level scoring") + + args = parser.parse_args() + + args.gpu = None + args.rank = 0 + + # world size is the total number of processes we want to run across all nodes and GPUs + args.world_size = N_GPU * args.n_proc_per_gpu + + if args.debug: + args.batch_size = 4 + + # fix away any kind of randomness - although for scoring it should not matter + random.seed(args.seed) + torch.manual_seed(args.seed) + cudnn.deterministic = True + + print("RESOLUTION {}".format(RESOLUTION)) + + ########################################################################## + print("-- scoring on GPU --") + + ngpus_per_node = torch.cuda.device_count() + print("nGPUs per node", ngpus_per_node) + + """ + First, read this: https://thelaziestprogrammer.com/python/a-multiprocessing-pool-pickle + + OK, so there are a few ways in which we can spawn a running process with pyTorch: + 1) Default mp.spawn should work just fine but won't let us access internals + 2) So we copied out the code from mp.spawn below to control how processes get created + 3) One could spawn their own processes but that would not be thread-safe with CUDA, line + "mp = multiprocessing.get_context('spawn')" guarantees we use the proper pyTorch context + + Input data serialization is too costly, in general so is output data serialization as noted here: + https://docs.python.org/3/library/multiprocessing.html + + Feeding data into each process is too costly, so each process loads its own data. + + For deserialization we could try and fail using: + 1) Multiprocessing queue manager + manager = Manager() + return_dict = manager.dict() + OR + result_queue = multiprocessing.Queue() + CALLING + with Manager() as manager: + results_list = manager.list() + mp.spawn(main_worker, nprocs=args.world_size, args=(ngpus_per_node, results_list/dict/queue, args)) + results = deepcopy(results_list) + 2) pickling results to disc. + + Turns out that for the reasons mentioned in the first article both approaches are too costly. + + The only reasonable way to deserialize data from a Python process is to write it to text, in which case + writing to JSON is a saner approach: https://www.datacamp.com/community/tutorials/pickle-python-tutorial + """ + + # invoke processes manually suppressing error queue + mp = multiprocessing.get_context("spawn") + # error_queues = [] + processes = [] + for i in range(args.world_size): + # error_queue = mp.SimpleQueue() + process = mp.Process(target=main_worker, args=(i, ngpus_per_node, args), daemon=False) + process.start() + # error_queues.append(error_queue) + processes.append(process) + + # block on wait + for process in processes: + process.join() + + print("-- aggregating results --") + + # Read 3D cube + data, data_info = read_segy(join(args.data, "data.segy")) + + # Log to tensorboard - input slice + logger = tb_logger.TBLogger("log", "Test") + logger.log_images( + args.slice + "_" + str(args.slice_num), get_slice(data, data_info, args.slice, args.slice_num), cm="gray", + ) + + x_coords = [] + y_coords = [] + z_coords = [] + predictions = [] + for i in range(args.world_size): + with open("results_{}.json".format(i), "r") as f: + results_dict = json.load(f) + + x_coords += results_dict["pixels_x"] + y_coords += results_dict["pixels_y"] + z_coords += results_dict["pixels_z"] + predictions += results_dict["preds"] + + """ + So because of Python's GIL having multiple workers write to the same array is not efficient - basically + the only way we can have shared memory is with threading but thanks to GIL only one thread can execute at a time, + so we end up with the overhead of managing multiple threads when writes happen sequentially. + + A much faster alternative is to just invoke underlying compiled code (C) through the use of array indexing. + + So basically instead of the following: + + NUM_CORES = multiprocessing.cpu_count() + print("Post-processing will run on {} CPU cores on your machine.".format(NUM_CORES)) + + def worker(classified_cube, coord): + x, y, z = coord + ind = new_coord_list.index(coord) + # print (coord, ind) + pred_class = predictions[ind] + classified_cube[x, y, z] = pred_class + + # launch workers in parallel with memory sharing ("threading" backend) + _ = Parallel(n_jobs=4*NUM_CORES, backend="threading")( + delayed(worker)(classified_cube, coord) for coord in tqdm(pixels) + ) + + We do this: + """ + + # placeholder for results + classified_cube = np.zeros(data.shape) + # store final results + classified_cube[x_coords, y_coords, z_coords] = predictions + + print("-- writing segy --") + in_file = join(args.data, "data.segy".format(RESOLUTION)) + out_file = join(args.data, "salt_{}.segy".format(RESOLUTION)) + write_segy(out_file, in_file, classified_cube) + + print("-- logging prediction --") + # log prediction to tensorboard + logger = tb_logger.TBLogger("log", "Test_scored") + logger.log_images( + args.slice + "_" + str(args.slice_num), + get_slice(classified_cube, data_info, args.slice, args.slice_num), + cm="binary", + ) + + +if __name__ == "__main__": + main() diff --git a/contrib/experiments/interpretation/voxel2pixel/texture_net.py b/contrib/experiments/interpretation/voxel2pixel/texture_net.py new file mode 100644 index 00000000..f19fda96 --- /dev/null +++ b/contrib/experiments/interpretation/voxel2pixel/texture_net.py @@ -0,0 +1,157 @@ +# Copyright (c) Microsoft. All rights reserved. +# Licensed under the MIT license. + +# code modified from https://github.com/waldeland/CNN-for-ASI + +import torch +from torch import nn + +from utils import gpu_no_of_var + + +class TextureNet(nn.Module): + def __init__(self, n_classes=2, n_filters=50): + super(TextureNet, self).__init__() + + # Network definition + # Parameters #in_channels, #out_channels, filter_size, stride (downsampling factor) + self.net = nn.Sequential( + nn.Conv3d(1, n_filters, 5, 4, padding=2), + nn.BatchNorm3d(n_filters), + # nn.Dropout3d() #Droput can be added like this ... + nn.ReLU(), + nn.Conv3d(n_filters, n_filters, 3, 2, padding=1, bias=False), + nn.BatchNorm3d(n_filters), + nn.ReLU(), + nn.Conv3d(n_filters, n_filters, 3, 2, padding=1, bias=False), + nn.BatchNorm3d(n_filters), + nn.ReLU(), + nn.Conv3d(n_filters, n_filters, 3, 2, padding=1, bias=False), + nn.BatchNorm3d(n_filters), + nn.ReLU(), + nn.Conv3d(n_filters, n_filters, 3, 3, padding=1, bias=False), + nn.BatchNorm3d(n_filters), + nn.ReLU(), + nn.Conv3d( + n_filters, n_classes, 1, 1 + ), # This is the equivalent of a fully connected layer since input has width/height/depth = 1 + nn.ReLU(), + ) + # The filter weights are by default initialized by random + + def forward(self, x): + """ + Is called to compute network output + + Args: + x: network input - torch tensor + + Returns: + output from the neural network + + """ + return self.net(x) + + def classify(self, x): + """ + Classification wrapper + + Args: + x: input tensor for classification + + Returns: + classification result + + """ + x = self.net(x) + _, class_no = torch.max(x, 1, keepdim=True) + return class_no + + # Functions to get output from intermediate feature layers + def f1(self, x): + """ + Wrapper to obtain a particular network layer + + Args: + x: input tensor for classification + + Returns: + requested layer + + """ + return self.getFeatures(x, 0) + + def f2(self, x): + """ + Wrapper to obtain a particular network layer + + Args: + x: input tensor for classification + + Returns: + requested layer + + """ + return self.getFeatures(x, 1) + + def f3(self, x): + """ + Wrapper to obtain a particular network layer + + Args: + x: input tensor for classification + + Returns: + requested layer + + """ + return self.getFeatures(x, 2) + + def f4(self, x): + """ + Wrapper to obtain a particular network layer + + Args: + x: input tensor for classification + + Returns: + requested layer + + """ + return self.getFeatures(x, 3) + + def f5(self, x): + """ + Wrapper to obtain a particular network layer + + Args: + x: input tensor for classification + + Returns: + requested layer + + """ + return self.getFeatures(x, 4) + + def getFeatures(self, x, layer_no): + """ + Main call method to call the wrapped layers + + Args: + x: input tensor for classification + layer_no: number of hidden layer we want to extract + + Returns: + requested layer + + """ + layer_indexes = [0, 3, 6, 9, 12] + + # Make new network that has the layers up to the requested output + tmp_net = nn.Sequential() + layers = list(self.net.children())[0 : layer_indexes[layer_no] + 1] + for i in range(len(layers)): + tmp_net.add_module(str(i), layers[i]) + if type(gpu_no_of_var(self)) == int: + tmp_net.cuda(gpu_no_of_var(self)) + return tmp_net(x) diff --git a/contrib/experiments/interpretation/voxel2pixel/train.py b/contrib/experiments/interpretation/voxel2pixel/train.py new file mode 100644 index 00000000..74f664ca --- /dev/null +++ b/contrib/experiments/interpretation/voxel2pixel/train.py @@ -0,0 +1,136 @@ +# Copyright (c) Microsoft. All rights reserved. +# Licensed under the MIT license. + +# code modified from https://github.com/waldeland/CNN-for-ASI + +from __future__ import print_function +from os.path import join +import torch +from torch import nn +from data import read_segy, read_labels, get_slice +from batch import get_random_batch +from torch.autograd import Variable +from texture_net import TextureNet +import tb_logger +import utils + +# Parameters +ROOT_PATH = "/home/maxkaz/data/dutchf3" +INPUT_VOXEL = "data.segy" +TRAIN_MASK = "inline_339.png" +VAL_MASK = "inline_405.png" +IM_SIZE = 65 +# If you have a GPU with little memory, try reducing this to 16 (may degrade results) +BATCH_SIZE = 32 +# Switch to toggle the use of GPU or not +USE_GPU = True +# Log progress on tensor board +LOG_TENSORBOARD = True + +# the rest of the code +if LOG_TENSORBOARD: + logger = tb_logger.TBLogger("log", "Train") + +# This is the network definition proposed in the paper +network = TextureNet(n_classes=2) + +# Loss function - Softmax function is included +cross_entropy = nn.CrossEntropyLoss() + +# Optimizer to control step size in gradient descent +optimizer = torch.optim.Adam(network.parameters()) + +# Transfer model to gpu +if USE_GPU and torch.cuda.is_available(): + network = network.cuda() + +# Load the data cube and labels +data, data_info = read_segy(join(ROOT_PATH, INPUT_VOXEL)) +train_class_imgs, train_coordinates = read_labels(join(ROOT_PATH, TRAIN_MASK), data_info) +val_class_imgs, _ = read_labels(join(ROOT_PATH, VAL_MASK), data_info) + +# Plot training/validation data with labels +if LOG_TENSORBOARD: + for class_img in train_class_imgs + val_class_imgs: + logger.log_images( + class_img[1] + "_" + str(class_img[2]), get_slice(data, data_info, class_img[1], class_img[2]), cm="gray", + ) + logger.log_images( + class_img[1] + "_" + str(class_img[2]) + "_true_class", class_img[0], + ) + +# Training loop +for i in range(5000): + + # Get random training batch with augmentation + # This is the bottle-neck for training and could be done more efficient on the GPU... + [batch, labels] = get_random_batch( + data, + train_coordinates, + IM_SIZE, + BATCH_SIZE, + random_flip=True, + random_stretch=0.2, + random_rot_xy=180, + random_rot_z=15, + ) + + # Format data to torch-variable + batch = Variable(torch.Tensor(batch).float()) + labels = Variable(torch.Tensor(labels).long()) + + # Transfer data to gpu + if USE_GPU and torch.cuda.is_available(): + batch = batch.cuda() + labels = labels.cuda() + + # Set network to training phase + network.train() + + # Run the samples through the network + output = network(batch) + + # Compute loss + loss = cross_entropy(torch.squeeze(output), labels) + + # Do back-propagation to get gradients of weights w.r.t. loss + loss.backward() + + # Ask the optimizer to adjust the parameters in the direction of lower loss + optimizer.step() + + # Every 10th iteration - print training loss + if i % 10 == 0: + network.eval() + + # Log to training loss/acc + print("Iteration:", i, "Training loss:", utils.var_to_np(loss)) + if LOG_TENSORBOARD: + logger.log_scalar("training_loss", utils.var_to_np(loss), i) + for k, v in utils.compute_accuracy(torch.argmax(output, 1), labels).items(): + if LOG_TENSORBOARD: + logger.log_scalar("training_" + k, v, i) + print(" -", k, v, "%") + + # every 100th iteration + if i % 100 == 0 and LOG_TENSORBOARD: + network.eval() + + # Output predicted train/validation class/probability images + for class_img in train_class_imgs + val_class_imgs: + + slice = class_img[1] + slice_no = class_img[2] + + class_img = utils.interpret( + network.classify, data, data_info, slice, slice_no, IM_SIZE, 16, return_full_size=True, use_gpu=USE_GPU, + ) + logger.log_images(slice + "_" + str(slice_no) + "_pred_class", class_img[0], step=i) + + class_img = utils.interpret( + network, data, data_info, slice, slice_no, IM_SIZE, 16, return_full_size=True, use_gpu=USE_GPU, + ) + logger.log_images(slice + "_" + str(slice_no) + "_pred_prob", class_img[0], i) + + # Store trained network + torch.save(network.state_dict(), join(ROOT_PATH, "saved_model.pt")) diff --git a/contrib/experiments/interpretation/voxel2pixel/utils.py b/contrib/experiments/interpretation/voxel2pixel/utils.py new file mode 100644 index 00000000..31db6b55 --- /dev/null +++ b/contrib/experiments/interpretation/voxel2pixel/utils.py @@ -0,0 +1,337 @@ +# Copyright (c) Microsoft. All rights reserved. +# Licensed under the MIT license. + +# code modified from https://github.com/waldeland/CNN-for-ASI + +from __future__ import print_function + +import torch +import numpy as np +from torch.autograd import Variable +from scipy.interpolate import interpn +import sys +import time + +# global parameters +ST = 0 +LAST_UPDATE = 0 + + +def interpret( + network, data, data_info, slice, slice_no, im_size, subsampl, return_full_size=True, use_gpu=True, +): + """ + Down-samples a slice from the classified image and upsamples to full resolution if needed. Basically + given a full 3D-classified voxel at a particular resolution (say we classify every n-th pixel as given by the + subsampl variable below) we take a particular slice from the voxel and optoinally blow it up to full resolution + as if we classified every single pixel. + + Args: + network: pytorch model definition + data: input voxel + data_info: input voxel information + slice: slice type which we want to interpret + slice_no: slice number + im_size: size of the voxel + subsampl: at what resolution do we want to subsample, e.g. we move across every subsampl pixels + return_full_size: boolean flag, enable if you want to return full size without downsampling + use_gpu: boolean flag to use the GPU + + Returns: + upsampled slice + + """ + + # Wrap np.linspace in compact function call + ls = lambda N: np.linspace(0, N - 1, N, dtype="int") + + # Size of cube + N0, N1, N2 = data.shape + + # Coords for full cube + x0_range = ls(N0) + x1_range = ls(N1) + x2_range = ls(N2) + + # Coords for subsampled cube + pred_points = (x0_range[::subsampl], x1_range[::subsampl], x2_range[::subsampl]) + + # Select slice + if slice == "full": + class_cube = data[::subsampl, ::subsampl, ::subsampl] * 0 + + elif slice == "inline": + slice_no = slice_no - data_info["inline_start"] + class_cube = data[::subsampl, 0:1, ::subsampl] * 0 + x1_range = np.array([slice_no]) + pred_points = (pred_points[0], pred_points[2]) + + elif slice == "crossline": + slice_no = slice_no - data_info["crossline_start"] + class_cube = data[::subsampl, ::subsampl, 0:1,] * 0 + x2_range = np.array([slice_no]) + pred_points = (pred_points[0], pred_points[1]) + + elif slice == "timeslice": + slice_no = slice_no - data_info["timeslice_start"] + class_cube = data[0:1, ::subsampl, ::subsampl] * 0 + x0_range = np.array([slice_no]) + pred_points = (pred_points[1], pred_points[2]) + + # Grid for small class slice/cube + n0, n1, n2 = class_cube.shape + x0_grid, x1_grid, x2_grid = np.meshgrid(ls(n0,), ls(n1), ls(n2), indexing="ij") + + # Grid for full slice/cube + X0_grid, X1_grid, X2_grid = np.meshgrid(x0_range, x1_range, x2_range, indexing="ij") + + # Indexes for large cube at small cube pixels + X0_grid_sub = X0_grid[::subsampl, ::subsampl, ::subsampl] + X1_grid_sub = X1_grid[::subsampl, ::subsampl, ::subsampl] + X2_grid_sub = X2_grid[::subsampl, ::subsampl, ::subsampl] + + # Get half window size + w = im_size // 2 + + # Loop through center pixels in output cube + for i in range(X0_grid_sub.size): + + # Get coordinates in small and large cube + x0 = x0_grid.ravel()[i] + x1 = x1_grid.ravel()[i] + x2 = x2_grid.ravel()[i] + + X0 = X0_grid_sub.ravel()[i] + X1 = X1_grid_sub.ravel()[i] + X2 = X2_grid_sub.ravel()[i] + + # Only compute when a full 65x65x65 cube can be extracted around center pixel + if X0 > w and X1 > w and X2 > w and X0 < N0 - w + 1 and X1 < N1 - w + 1 and X2 < N2 - w + 1: + + # Get mini-cube around center pixel + mini_cube = data[X0 - w : X0 + w + 1, X1 - w : X1 + w + 1, X2 - w : X2 + w + 1] + + # Get predicted "probabilities" + mini_cube = Variable(torch.FloatTensor(mini_cube[np.newaxis, np.newaxis, :, :, :])) + if use_gpu: + mini_cube = mini_cube.cuda() + out = network(mini_cube) + out = out.data.cpu().numpy() + + out = out[:, :, out.shape[2] // 2, out.shape[3] // 2, out.shape[4] // 2] + out = np.squeeze(out) + + # Make one output pr output channel + if not isinstance(class_cube, list): + class_cube = np.split(np.repeat(class_cube[:, :, :, np.newaxis], out.size, 3), out.size, axis=3,) + + # Insert into output + if out.size == 1: + class_cube[0][x0, x1, x2] = out + else: + for i in range(out.size): + class_cube[i][x0, x1, x2] = out[i] + + # Keep user informed about progress + if slice == "full": + printProgressBar(i, x0_grid.size) + + # Resize to input size + if return_full_size: + if slice == "full": + print("Interpolating down sampled results to fit input cube") + + N = X0_grid.size + + # Output grid + if slice == "full": + grid_output_cube = np.concatenate( + [X0_grid.reshape([N, 1]), X1_grid.reshape([N, 1]), X2_grid.reshape([N, 1]),], 1, + ) + elif slice == "inline": + grid_output_cube = np.concatenate([X0_grid.reshape([N, 1]), X2_grid.reshape([N, 1])], 1) + elif slice == "crossline": + grid_output_cube = np.concatenate([X0_grid.reshape([N, 1]), X1_grid.reshape([N, 1])], 1) + elif slice == "timeslice": + grid_output_cube = np.concatenate([X1_grid.reshape([N, 1]), X2_grid.reshape([N, 1])], 1) + + # Interpolation + for i in range(len(class_cube)): + is_int = ( + np.sum( + np.unique(class_cube[i]).astype("float") - np.unique(class_cube[i]).astype("int32").astype("float") + ) + == 0 + ) + class_cube[i] = interpn( + pred_points, + class_cube[i].astype("float").squeeze(), + grid_output_cube, + method="linear", + fill_value=0, + bounds_error=False, + ) + class_cube[i] = class_cube[i].reshape([x0_range.size, x1_range.size, x2_range.size]) + + # If ouput is class labels we convert the interpolated array to ints + if is_int: + class_cube[i] = class_cube[i].astype("int32") + + if slice == "full": + print("Finished interpolating") + + # Squeeze outputs + for i in range(len(class_cube)): + class_cube[i] = class_cube[i].squeeze() + + return class_cube + + +# TODO: this should probably be replaced with TQDM +def print_progress_bar(iteration, total, prefix="", suffix="", decimals=1, length=100, fill="="): + """ + Privides a progress bar implementation. + + Adapted from https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console/14879561#14879561 + + Args: + iteration: iteration number + total: total number of iterations + prefix: comment prefix in display + suffix: comment suffix in display + decimals: how many decimals to display + length: character length of progress bar + fill: character to display as progress bar + + """ + global ST, LAST_UPDATE + + # Expect itteration to go from 0 to N-1 + iteration = iteration + 1 + + # Only update every 5 second + if time.time() - LAST_UPDATE < 5: + if iteration == total: + time.sleep(1) + else: + return + + if iteration <= 1: + st = time.time() + exp_h = "" + exp_m = "" + exp_s = "" + elif iteration == total: + exp_time = time.time() - ST + exp_h = int(exp_time / 3600) + exp_m = int(exp_time / 60 - exp_h * 60.0) + exp_s = int(exp_time - exp_m * 60.0 - exp_h * 3600.0) + else: + exp_time = (time.time() - ST) / (iteration - 1) * total - (time.time() - ST) + exp_h = int(exp_time / 3600) + exp_m = int(exp_time / 60 - exp_h * 60.0) + exp_s = int(exp_time - exp_m * 60.0 - exp_h * 3600.0) + + percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total))) + filled_length = int(length * iteration // total) + bar = fill * filled_length + "-" * (length - filled_length) + if iteration != total: + print("\r%s |%s| %s%% %s - %sh %smin %ss left" % (prefix, bar, percent, suffix, exp_h, exp_m, exp_s)) + else: + print("\r%s |%s| %s%% %s - %sh %smin %ss " % (prefix, bar, percent, suffix, exp_h, exp_m, exp_s)) + sys.stdout.write("\033[F") + # Print New Line on Complete + if iteration == total: + print("") + # last_update = time.time() + + +# TODO: rewrite this whole function to get rid of excepts +# TODO: also not sure what this function is for - it's almost as if it's not needed - try to remove it. +def gpu_no_of_var(var): + """ + Function that returns the GPU number or whether the tensor is on GPU or not + + Args: + var: torch tensor + + Returns: + The CUDA device that the torch tensor is on, or whether the tensor is on GPU + + """ + + try: + is_cuda = next(var.parameters()).is_cuda + except: + is_cuda = var.is_cuda + + if is_cuda: + try: + return next(var.parameters()).get_device() + except: + return var.get_device() + else: + return False + + +# TODO: remove all the try except statements +def var_to_np(var): + """ + Take a pyTorch tensor and convert it to numpy array of the same shape, as the name suggests. + + Args: + var: input variable + + Returns: + numpy array of the tensor + + """ + if type(var) in [np.array, np.ndarray]: + return var + + # If input is list we do this for all elements + if type(var) == type([]): + out = [] + for v in var: + out.append(var_to_np(v)) + return out + + try: + var = var.cpu() + except: + None + try: + var = var.data + except: + None + try: + var = var.numpy() + except: + None + + if type(var) == tuple: + var = var[0] + return var + + +def compute_accuracy(predicted_class, labels): + """ + Accuracy performance metric which needs to be computed + + Args: + predicted_class: pyTorch tensor with predictions + labels: pyTorch tensor with ground truth labels + + Returns: + Accuracy calculation as a dictionary per class and average class accuracy across classes + + """ + labels = var_to_np(labels) + predicted_class = var_to_np(predicted_class) + + accuracies = {} + for cls in np.unique(labels): + if cls >= 0: + accuracies["accuracy_class_" + str(cls)] = int(np.mean(predicted_class[labels == cls] == cls) * 100) + accuracies["average_class_accuracy"] = np.mean([acc for acc in accuracies.values()]) + return accuracies diff --git a/contrib/fwi/azureml_devito/README.md b/contrib/fwi/azureml_devito/README.md new file mode 100755 index 00000000..80b11e85 --- /dev/null +++ b/contrib/fwi/azureml_devito/README.md @@ -0,0 +1,60 @@ +# DeepSeismic + +## Imaging + +This tutorial shows how to run [devito](https://www.devitoproject.org/) tutorial [notebooks](https://github.com/opesci/devito/tree/master/examples/seismic/tutorials) in Azure Machine Learning ([Azure ML](https://docs.microsoft.com/en-us/azure/machine-learning/)) using [Azure Machine Learning Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/service/tutorial-1st-experiment-sdk-setup). + +For best experience use a Linux (Ubuntu) Azure [DSVM](https://docs.microsoft.com/en-us/azure/machine-learning/data-science-virtual-machine/dsvm-ubuntu-intro) and Jupyter Notebook with AzureML Python SDK and [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) to run the notebooks (see __Setting up Environment__ section below). + +Devito is a domain-specific Language (DSL) and code generation framework for the design of highly optimized finite difference kernels via symbolic computation for use in inversion methods. Here we show how ```devito``` can be openly used in the cloud by leveraging AzureML experimentation framework as a transparent and scalable platform for generic computation workloads. We focus on Full waveform inversion (__FWI__) problems where non-linear data-fitting procedures are applied for computing estimates of subsurface properties from seismic data. + + +### Setting up Environment + +The [conda environment](https://docs.conda.io/projects/conda/en/latest/user-guide/concepts/environments.html) that encapsulates all the dependencies needed to run the notebooks described above can be created using the fwi_dev_conda_environment.yml file. See [here](https://github.com/Azure/MachineLearningNotebooks/blob/master/NBSETUP.md) generic instructions on how to install and run AzureML Python SDK in Jupyter Notebooks. + +To create the conda environment, run: +``` +conda env create -f fwi_dev_conda_environment.yml + +``` + +then, one can see the created environment within the list of available environments and export it as a .yml file: +``` +conda env list +conda env export --name fwi_dev_conda_environment -f ./contrib/fwi/azureml_devito/fwi_dev_conda_environment_exported.yml + +``` +The created conda environment needs to be activated, followed by the installation of its corresponding IPython kernel: +``` +conda activate fwi_dev_conda_environment +python -m ipykernel install --user --name fwi_dev_conda_environment --display-name "fwi_dev_conda_environment Python" +``` + +Finally, start Jupyter notebook from within the activated environment: +``` +jupyter notebook +``` +One can then choose the __fwi_dev_conda_environment Python__ kernel defined above either when a notebook is opened for the first time, or by using the "Kernel/Change kernel" notebook menu. + + + +[Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) is also used to create an ACR in notebook 000_Setup_GeophysicsTutorial_FWI_Azure_devito, and then push and pull docker images. One can also create the ACR via Azure [portal](https://azure.microsoft.com/). + +### Run devito in Azure +The devito fwi examples are run in AzuremL using 4 notebooks: + - ```000_Setup_GeophysicsTutorial_FWI_Azure_devito.ipynb```: sets up Azure resources (like resource groups, AzureML [workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-workspace), Azure (docker) [container registry](https://azure.microsoft.com/en-us/services/container-registry/)). + - ```010_CreateExperimentationDockerImage_GeophysicsTutorial_FWI_Azure_devito.ipynb```: Creates a custom docker file and the associated image that contains ```devito``` [github repository](https://github.com/opesci/devito.git) (including devito fwi tutorial [notebooks](https://github.com/opesci/devito/tree/master/examples/seismic/tutorials)) and runs the official devito install [tests](https://github.com/opesci/devito/tree/master/tests). + - ```020_UseAzureMLEstimatorForExperimentation_GeophysicsTutorial_FWI_Azure_devito.ipynb```: shows how the devito fwi tutorial [notebooks](https://github.com/opesci/devito/tree/master/examples/seismic/tutorials) can be run in AzureML using Azure Machine Learning [generic](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.estimator?view=azure-ml-py) [estimators](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-train-ml-models) with custom docker images. FWI computation takes place on a managed AzureML [remote compute cluster](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets). + + ```Devito``` fwi computation artifacts (images and notebooks with data processing output results) are tracked under the AzureML workspace, and can be later downloaded and visualized. + + Two ways of running devito code are shown: + + (1) using __custom code__ (slightly modified graphing functions that save images to files). The AzureML experimentation job is defined by the devito code packaged as a py file. The experimentation job (defined by [azureml.core.experiment.Experiment](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.experiment.experiment?view=azure-ml-py) class can be used to track metrics or other artifacts (images) that are available in Azure portal. + + (2) using [__papermill__](https://github.com/nteract/papermill) invoked via its Python API to run unedited devito demo notebooks (including the [dask](https://dask.org/) local cluster [example](https://github.com/opesci/devito/blob/master/examples/seismic/tutorials/04_dask.ipynb) on the remote compute target and the results as saved notebooks that are available in Azure portal. + + - ```030_ScaleJobsUsingAzuremL_GeophysicsTutorial_FWI_Azure_devito.ipynb```: shows how the devito fwi tutorial notebooks can be run in parallel on the elastically allocated AzureML [remote compute cluster](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets) created before. By submitting multiple jobs via azureml.core.Experiment submit(azureml.train.estimator.Estimator) one can use the [portal](https://portal.azure.com) to visualize the elastic allocation of AzureML [remote compute cluster](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets) nodes. + + diff --git a/contrib/fwi/azureml_devito/fwi_dev_conda_environment.yml b/contrib/fwi/azureml_devito/fwi_dev_conda_environment.yml new file mode 100755 index 00000000..d3757663 --- /dev/null +++ b/contrib/fwi/azureml_devito/fwi_dev_conda_environment.yml @@ -0,0 +1,17 @@ + +name: fwi_dev_conda_environment + +channels: + - anaconda +dependencies: + - python=3.7 + - numpy + - notebook + - ipykernel #nb_conda + - scikit-learn + - pip + - pip: + - python-dotenv + - papermill[azure] + - azureml-sdk[notebooks,automl,explain]==1.0.76 + - docker diff --git a/contrib/fwi/azureml_devito/fwi_dev_conda_environment_exported.yml b/contrib/fwi/azureml_devito/fwi_dev_conda_environment_exported.yml new file mode 100644 index 00000000..ac0566d3 --- /dev/null +++ b/contrib/fwi/azureml_devito/fwi_dev_conda_environment_exported.yml @@ -0,0 +1,211 @@ +name: fwi_dev_conda_environment +channels: + - anaconda + - defaults +dependencies: + - attrs=19.3.0=py_0 + - backcall=0.1.0=py37_0 + - blas=1.0=mkl + - bleach=3.1.0=py37_0 + - ca-certificates=2019.11.27=0 + - certifi=2019.11.28=py37_0 + - decorator=4.4.1=py_0 + - defusedxml=0.6.0=py_0 + - entrypoints=0.3=py37_0 + - gmp=6.1.2=hb3b607b_0 + - importlib_metadata=1.1.0=py37_0 + - intel-openmp=2019.5=281 + - ipykernel=5.1.3=py37h39e3cac_0 + - ipython=7.10.1=py37h39e3cac_0 + - ipython_genutils=0.2.0=py37_0 + - jedi=0.15.1=py37_0 + - jinja2=2.10.3=py_0 + - joblib=0.14.0=py_0 + - jsonschema=3.2.0=py37_0 + - jupyter_client=5.3.4=py37_0 + - jupyter_core=4.6.1=py37_0 + - libedit=3.1.20181209=hc058e9b_0 + - libffi=3.2.1=h4deb6c0_3 + - libgcc-ng=9.1.0=hdf63c60_0 + - libgfortran-ng=7.3.0=hdf63c60_0 + - libsodium=1.0.16=h1bed415_0 + - libstdcxx-ng=9.1.0=hdf63c60_0 + - markupsafe=1.1.1=py37h7b6447c_0 + - mistune=0.8.4=py37h7b6447c_0 + - mkl=2019.5=281 + - mkl-service=2.3.0=py37he904b0f_0 + - mkl_fft=1.0.15=py37ha843d7b_0 + - mkl_random=1.1.0=py37hd6b4f25_0 + - more-itertools=7.2.0=py37_0 + - nbconvert=5.6.1=py37_0 + - nbformat=4.4.0=py37_0 + - ncurses=6.1=he6710b0_1 + - notebook=6.0.2=py37_0 + - openssl=1.1.1=h7b6447c_0 + - pandoc=2.2.3.2=0 + - pandocfilters=1.4.2=py37_1 + - parso=0.5.1=py_0 + - pexpect=4.7.0=py37_0 + - pickleshare=0.7.5=py37_0 + - pip=19.3.1=py37_0 + - prometheus_client=0.7.1=py_0 + - prompt_toolkit=3.0.2=py_0 + - ptyprocess=0.6.0=py37_0 + - pygments=2.5.2=py_0 + - pyrsistent=0.15.6=py37h7b6447c_0 + - python=3.7.5=h0371630_0 + - python-dateutil=2.8.1=py_0 + - pyzmq=18.1.0=py37he6710b0_0 + - readline=7.0=h7b6447c_5 + - send2trash=1.5.0=py37_0 + - setuptools=42.0.2=py37_0 + - six=1.13.0=py37_0 + - sqlite=3.30.1=h7b6447c_0 + - terminado=0.8.3=py37_0 + - testpath=0.4.4=py_0 + - tk=8.6.8=hbc83047_0 + - tornado=6.0.3=py37h7b6447c_0 + - traitlets=4.3.3=py37_0 + - wcwidth=0.1.7=py37_0 + - webencodings=0.5.1=py37_1 + - xz=5.2.4=h14c3975_4 + - zeromq=4.3.1=he6710b0_3 + - zipp=0.6.0=py_0 + - zlib=1.2.11=h7b6447c_3 + - pip: + - adal==1.2.2 + - ansiwrap==0.8.4 + - applicationinsights==0.11.9 + - azure-common==1.1.23 + - azure-core==1.1.1 + - azure-datalake-store==0.0.48 + - azure-graphrbac==0.61.1 + - azure-mgmt-authorization==0.60.0 + - azure-mgmt-containerregistry==2.8.0 + - azure-mgmt-keyvault==2.0.0 + - azure-mgmt-resource==7.0.0 + - azure-mgmt-storage==7.0.0 + - azure-storage-blob==12.1.0 + - azureml-automl-core==1.0.76 + - azureml-automl-runtime==1.0.76.1 + - azureml-contrib-notebook==1.0.76 + - azureml-core==1.0.76 + - azureml-dataprep==1.1.33 + - azureml-dataprep-native==13.1.0 + - azureml-defaults==1.0.76 + - azureml-explain-model==1.0.76 + - azureml-interpret==1.0.76 + - azureml-model-management-sdk==1.0.1b6.post1 + - azureml-pipeline==1.0.76 + - azureml-pipeline-core==1.0.76 + - azureml-pipeline-steps==1.0.76 + - azureml-sdk==1.0.76 + - azureml-telemetry==1.0.76 + - azureml-train==1.0.76 + - azureml-train-automl==1.0.76 + - azureml-train-automl-client==1.0.76 + - azureml-train-automl-runtime==1.0.76.1 + - azureml-train-core==1.0.76 + - azureml-train-restclients-hyperdrive==1.0.76 + - azureml-widgets==1.0.76 + - backports-tempfile==1.0 + - backports-weakref==1.0.post1 + - boto==2.49.0 + - boto3==1.10.37 + - botocore==1.13.37 + - cffi==1.13.2 + - chardet==3.0.4 + - click==7.0 + - cloudpickle==1.2.2 + - configparser==3.7.4 + - contextlib2==0.6.0.post1 + - cryptography==2.8 + - cycler==0.10.0 + - cython==0.29.14 + - dill==0.3.1.1 + - distro==1.4.0 + - docker==4.1.0 + - docutils==0.15.2 + - dotnetcore2==2.1.11 + - fire==0.2.1 + - flake8==3.7.9 + - flask==1.0.3 + - fusepy==3.0.1 + - future==0.18.2 + - gensim==3.8.1 + - gunicorn==19.9.0 + - idna==2.8 + - imageio==2.6.1 + - interpret-community==0.2.3 + - interpret-core==0.1.19 + - ipywidgets==7.5.1 + - isodate==0.6.0 + - itsdangerous==1.1.0 + - jeepney==0.4.1 + - jmespath==0.9.4 + - json-logging-py==0.2 + - jsonform==0.0.2 + - jsonpickle==1.2 + - jsonsir==0.0.2 + - keras2onnx==1.6.0 + - kiwisolver==1.1.0 + - liac-arff==2.4.0 + - lightgbm==2.3.0 + - matplotlib==3.1.2 + - mccabe==0.6.1 + - msrest==0.6.10 + - msrestazure==0.6.2 + - ndg-httpsclient==0.5.1 + - networkx==2.4 + - nimbusml==1.6.1 + - numpy==1.16.2 + - oauthlib==3.1.0 + - onnx==1.6.0 + - onnxconverter-common==1.6.0 + - onnxmltools==1.4.1 + - packaging==19.2 + - pandas==0.23.4 + - papermill==1.2.1 + - pathspec==0.6.0 + - patsy==0.5.1 + - pillow==6.2.1 + - pmdarima==1.1.1 + - protobuf==3.11.1 + - psutil==5.6.7 + - pyasn1==0.4.8 + - pycodestyle==2.5.0 + - pycparser==2.19 + - pyflakes==2.1.1 + - pyjwt==1.7.1 + - pyopenssl==19.1.0 + - pyparsing==2.4.5 + - python-dotenv==0.10.3 + - python-easyconfig==0.1.7 + - pytz==2019.3 + - pywavelets==1.1.1 + - pyyaml==5.2 + - requests==2.22.0 + - requests-oauthlib==1.3.0 + - resource==0.2.1 + - ruamel-yaml==0.15.89 + - s3transfer==0.2.1 + - scikit-image==0.16.2 + - scikit-learn==0.20.3 + - scipy==1.1.0 + - secretstorage==3.1.1 + - shap==0.29.3 + - skl2onnx==1.4.9 + - sklearn-pandas==1.7.0 + - smart-open==1.9.0 + - statsmodels==0.10.2 + - tenacity==6.0.0 + - termcolor==1.1.0 + - textwrap3==0.9.2 + - tqdm==4.40.2 + - typing-extensions==3.7.4.1 + - urllib3==1.25.7 + - websocket-client==0.56.0 + - werkzeug==0.16.0 + - wheel==0.30.0 + - widgetsnbextension==3.5.1 +prefix: /data/anaconda/envs/fwi_dev_conda_environment diff --git a/contrib/fwi/azureml_devito/notebooks/000_Setup_GeophysicsTutorial_FWI_Azure_devito.ipynb b/contrib/fwi/azureml_devito/notebooks/000_Setup_GeophysicsTutorial_FWI_Azure_devito.ipynb new file mode 100755 index 00000000..c9a17f1e --- /dev/null +++ b/contrib/fwi/azureml_devito/notebooks/000_Setup_GeophysicsTutorial_FWI_Azure_devito.ipynb @@ -0,0 +1,923 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Copyright (c) Microsoft Corporation. \n", + "Licensed under the MIT License." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# FWI in Azure project\n", + "\n", + "## Set-up AzureML resources\n", + "\n", + "This project ports devito (https://github.com/opesci/devito) into Azure and runs tutorial notebooks at:\n", + "https://nbviewer.jupyter.org/github/opesci/devito/blob/master/examples/seismic/tutorials/\n", + "\n", + "\n", + "\n", + "In this notebook we setup AzureML resources. This notebook should be run once and will enable all subsequent notebooks.\n", + "\n", + "\n", + "User input requiring steps:\n", + " - [Fill in and save sensitive information](#dot_env_description)\n", + " - [Azure login](#Azure_login) (may be required first time the notebook is run) \n", + " - [Set __create_ACR_FLAG__ to true to trigger ACR creation and to save of ACR login info](#set_create_ACR_flag)\n", + " - [Azure CLI login ](#Azure_cli_login) (may be required once to create an [ACR](https://azure.microsoft.com/en-us/services/container-registry/)) \n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# Allow multiple displays per cell\n", + "from IPython.core.interactiveshell import InteractiveShell\n", + "InteractiveShell.ast_node_interactivity = \"all\" " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Azure Machine Learning and Pipeline SDK-specific imports" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import sys, os\n", + "import shutil\n", + "import urllib\n", + "import azureml.core\n", + "from azureml.core import Workspace, Experiment\n", + "from azureml.core.compute import ComputeTarget, AmlCompute\n", + "from azureml.core.compute_target import ComputeTargetException\n", + "import platform, dotenv\n", + "import pathlib" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Azure ML SDK Version: 1.0.76\n" + ] + }, + { + "data": { + "text/plain": [ + "'Linux-4.15.0-1064-azure-x86_64-with-debian-stretch-sid'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "'/datadrive01/prj/DeepSeismic/contrib/fwi/azureml_devito/notebooks'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "print(\"Azure ML SDK Version: \", azureml.core.VERSION)\n", + "platform.platform()\n", + "os.getcwd()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 1. Create utilities file\n", + "\n", + "##### 1.1 Define utilities file (project_utils.py) path\n", + "Utilities file created here has code for Azure resources access authorization, project configuration settings like directories and file names in __project_consts__ class." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "utils_file_name = 'project_utils'\n", + "auxiliary_files_dir = os.path.join(*(['.', 'src']))\n", + "\n", + "\n", + "utils_path_name = os.path.join(os.getcwd(), auxiliary_files_dir)\n", + "utils_full_name = os.path.join(utils_path_name, os.path.join(*([utils_file_name+'.py'])))\n", + "os.makedirs(utils_path_name, exist_ok=True)\n", + " \n", + "def ls_l(a_dir):\n", + " return ([f for f in os.listdir(a_dir) if os.path.isfile(os.path.join(a_dir, f))]) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### 1.2. Edit/create project_utils.py file" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting /datadrive01/prj/DeepSeismic/contrib/fwi/azureml_devito/notebooks/./src/project_utils.py\n" + ] + } + ], + "source": [ + "%%writefile $utils_full_name\n", + "\n", + "from azureml.core.authentication import ServicePrincipalAuthentication\n", + "from azureml.core.authentication import AzureCliAuthentication\n", + "from azureml.core.authentication import InteractiveLoginAuthentication\n", + "from azureml.core.authentication import AuthenticationException\n", + "import dotenv, logging, pathlib, os\n", + "\n", + "\n", + "# credit Mathew Salvaris\n", + "def get_auth(env_path):\n", + " \"\"\"Tries to get authorization info by first trying to get Service Principal info, then CLI, then interactive. \n", + " \"\"\"\n", + " logger = logging.getLogger(__name__)\n", + " crt_sp_pwd = os.environ.get(\"SP_PASSWORD\", None)\n", + " if crt_sp_pwd:\n", + " logger.debug(\"Trying to create Workspace with Service Principal\")\n", + " aml_sp_password = crt_sp_pwd\n", + " aml_sp_tennant_id = dotenv.get_key(env_path, 'SP_TENANT_ID')\n", + " aml_sp_username = dotenv.get_key(env_path, 'SP_APPLICATION_ID')\n", + " auth = ServicePrincipalAuthentication(\n", + " tenant_id=aml_sp_tennant_id,\n", + " username=aml_sp_username,\n", + " password=aml_sp_password,\n", + " )\n", + " else:\n", + " logger.debug(\"Trying to create Workspace with CLI Authentication\")\n", + " try:\n", + " auth = AzureCliAuthentication()\n", + " auth.get_authentication_header()\n", + " except AuthenticationException:\n", + " logger.debug(\"Trying to create Workspace with Interactive login\")\n", + " auth = InteractiveLoginAuthentication()\n", + "\n", + " return auth \n", + "\n", + "\n", + "def set_dotenv_info(dotenv_file_path, env_dict):\n", + " \"\"\"Use dict loop to set multiple keys in dotenv file.\n", + " Minimal file error management.\n", + " \"\"\"\n", + " logger = logging.getLogger(__name__)\n", + " if bool(env_dict):\n", + " dotenv_file = pathlib.Path(dotenv_file_path)\n", + " if not dotenv_file.is_file():\n", + " logger.debug('dotenv file not found, will create \"{}\" using the sensitive info you provided.'.format(dotenv_file_path))\n", + " dotenv_file.touch()\n", + " else:\n", + " logger.debug('dotenv file \"{}\" found, will (over)write it with current sensitive info you provided.'.format(dotenv_file_path))\n", + " \n", + " for crt_key, crt_val in env_dict.items():\n", + " dotenv.set_key(dotenv_file_path, crt_key, crt_val)\n", + "\n", + " else:\n", + " logger.debug(\\\n", + " 'Trying to save empty env_dict variable into {}, please set your sensitive info in a dictionary.'\\\n", + " .format(dotenv_file_path)) \n", + " \n", + "\n", + "class project_consts(object):\n", + " \"\"\"Keep project's file names and directory structure in one place.\n", + " Minimal setattr error management.\n", + " \"\"\"\n", + " \n", + " AML_WORKSPACE_CONFIG_DIR = ['.', '..', 'not_shared']\n", + " AML_EXPERIMENT_DIR = ['.', '..', 'temp']\n", + " AML_WORKSPACE_CONFIG_FILE_NAME = 'aml_ws_config.json'\n", + " DOTENV_FILE_PATH = AML_WORKSPACE_CONFIG_DIR + ['general.env'] \n", + " DOCKER_DOTENV_FILE_PATH = AML_WORKSPACE_CONFIG_DIR + ['dockerhub.env'] \n", + "\n", + " def __setattr__(self, *_):\n", + " raise TypeError\n", + "\n", + " \n", + "if __name__==\"__main__\":\n", + " \"\"\"Basic function/class tests.\n", + " \"\"\"\n", + " import sys, os\n", + " prj_consts = project_consts()\n", + " logger = logging.getLogger(__name__)\n", + " logging.basicConfig(level=logging.DEBUG) # Logging Levels: DEBUG\t10, NOTSET\t0\n", + " logger.debug('AML ws file = {}'.format(os.path.join(*([os.path.join(*(prj_consts.AML_WORKSPACE_CONFIG_DIR)),\n", + " prj_consts.AML_WORKSPACE_CONFIG_FILE_NAME]))))\n", + "\n", + " crt_dotenv_file_path = os.path.join(*(prj_consts.DOTENV_FILE_PATH))\n", + " set_dotenv_info(crt_dotenv_file_path, {})\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### 1.3. Import utilities functions defined above" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[None]" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "def add_path_to_sys_path(path_to_append):\n", + " if not (any(path_to_append in paths for paths in sys.path)):\n", + " sys.path.append(path_to_append)\n", + " \n", + "paths_to_append = [os.path.join(os.getcwd(), auxiliary_files_dir)]\n", + "[add_path_to_sys_path(crt_path) for crt_path in paths_to_append]\n", + "\n", + "\n", + "import project_utils\n", + "prj_consts = project_utils.project_consts()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 2. Set-up the AML SDK infrastructure\n", + "\n", + "* Create Azure resource group (rsg), workspaces, \n", + "* save sensitive info using [python-dotenv](https://github.com/theskumar/python-dotenv) \n", + " \n", + "Notebook repeateability notes:\n", + "* The notebook tries to find and use an existing Azure resource group (rsg) defined by __crt_resource_group__. It creates a new one if needed. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "##### Create [ACR]() first time this notebook is run. \n", + "Either docker hub or ACR can be used to store the experimentation image. To create the ACR, set: \n", + "```\n", + "create_ACR_FLAG=True \n", + "```\n", + "It will create an ACR by running severral steps described below in section 2.7. __Create an [ACR]__ \n", + " \n", + " \n", + "[Back](#user_input_requiring_steps) to summary of user input requiring steps." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "create_ACR_FLAG = False #True False" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "sensitive_info = {}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "##### 2.1. Input here sensitive and configuration information\n", + "[dotenv](https://github.com/theskumar/python-dotenv) is used to hide sensitive info, like Azure subscription name/ID. The serialized info needs to be manually input once. \n", + " \n", + "* REQUIRED ACTION for the 2 cells below: uncomment them, add the required info in first cell below, run both cells one. \n", + " The sensitive information will be packed in __sensitive_info__ dictionary variable, which that will then be saved in a following cell in an .env file (__dotenv_file_path__) that should likely be git ignored. \n", + "\n", + "* OPTIONAL STEP: After running once the two cells below to save __sensitive_info__ dictionary variable with your custom info, you can comment them and leave the __sensitive_info__ variable defined above as an empty python dictionary. \n", + " \n", + " \n", + "__Notes__:\n", + "* An empty __sensitive_info__ dictionary is ignored by the __set_dotenv_info__ function defined above in project_utils.py . \n", + "* The saved .env file will be used thereafter in each cell that starts with %dotenv. \n", + "* The saved .env file contains user specific information and it shoulld __not__ be version-controlled in git.\n", + "* If you would like to [use service principal authentication](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/manage-azureml-service/authentication-in-azureml/authentication-in-azure-ml.ipynb) make sure you provide the optional values as well (see get_auth function definition in project_utils.py file created above for details).\n", + "\n", + "[Back](#user_input_requiring_steps) to summary of user input requiring steps." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "# subscription_id = \"\"\n", + "# resource_group = \"ghiordanfwirsg01\"\n", + "# workspace_name = \"ghiordanfwiws\"\n", + "# workspace_region = \"eastus2\"\n", + "# gpu_cluster_name = \"gpuclstfwi02\"\n", + "# gpucluster_admin_user_name = \"\"\n", + "# gpucluster_admin_user_password = \"\"\n", + "\n", + "# experimentation_docker_image_name = \"fwi01_azureml\"\n", + "# experimentation_docker_image_tag = \"sdk.v1.0.60\"\n", + "# docker_container_mount_point = os.getcwd() # use project directory or a subdirectory\n", + "\n", + "# docker_login = \"georgedockeraccount\"\n", + "# docker_pwd = \"\"\n", + "\n", + "# acr_name=\"fwi01acr\"" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "# sensitive_info = {\n", + "# 'SUBSCRIPTION_ID':subscription_id,\n", + "# 'RESOURCE_GROUP':resource_group, \n", + "# 'WORKSPACE_NAME':workspace_name, \n", + "# 'WORKSPACE_REGION':workspace_region,\n", + "# 'GPU_CLUSTER_NAME':gpu_cluster_name,\n", + "# 'GPU_CLUSTER_ADMIN_USER_NAME':gpucluster_admin_user_name,\n", + "# 'GPU_CLUSTER_ADMIN_USER_PASSWORD':gpucluster_admin_user_password,\n", + "# 'EXPERIMENTATION_DOCKER_IMAGE_NAME':experimentation_docker_image_name,\n", + "# 'EXPERIMENTATION_DOCKER_IMAGE_TAG':experimentation_docker_image_tag,\n", + "# 'DOCKER_CONTAINER_MOUNT_POINT':docker_container_mount_point,\n", + "# 'DOCKER_LOGIN':docker_login,\n", + "# 'DOCKER_PWD':docker_pwd,\n", + "# 'ACR_NAME':acr_name\n", + "# }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### 2.2. Save sensitive info\n", + "An empty __sensitive_info__ variable will be ingored. \n", + "A non-empty __sensitive_info__ variable will overwrite info in an existing .env file." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'./../not_shared/general.env'" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "%load_ext dotenv\n", + "dotenv_file_path = os.path.join(*(prj_consts.DOTENV_FILE_PATH)) \n", + "os.makedirs(os.path.join(*(prj_consts.DOTENV_FILE_PATH[:-1])), exist_ok=True)\n", + "pathlib.Path(dotenv_file_path).touch()\n", + "\n", + "# # show .env file path\n", + "# !pwd\n", + "dotenv_file_path\n", + "\n", + "#save your sensitive info\n", + "project_utils.set_dotenv_info(dotenv_file_path, sensitive_info)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### 2.3. Use (load) saved sensitive info\n", + "THis is how sensitive info will be retrieved in other notebooks" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "%dotenv $dotenv_file_path\n", + "\n", + "subscription_id = os.getenv('SUBSCRIPTION_ID')\n", + "# # print a bit of subscription ID, to show dotenv file was found and loaded \n", + "# subscription_id[:2]\n", + "\n", + "crt_resource_group = os.getenv('RESOURCE_GROUP')\n", + "crt_workspace_name = os.getenv('WORKSPACE_NAME')\n", + "crt_workspace_region = os.getenv('WORKSPACE_REGION') " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### 2.4. Access your workspace\n", + "\n", + "* In AML SDK we can get a ws in two ways: \n", + " - via Workspace(subscription_id = ...) \n", + " - via Workspace.from_config(path=some_file_path). \n", + " \n", + "For demo purposes, both ways are shown in this notebook.\n", + "\n", + "* At first notebook run:\n", + " - the AML workspace ws is typically not found, so a new ws object is created and persisted on disk.\n", + " - If the ws has been created other ways (e.g. via Azure portal), it may be persisted on disk by calling ws1.write_config(...)." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "workspace_config_dir = os.path.join(*(prj_consts.AML_WORKSPACE_CONFIG_DIR))\n", + "workspace_config_file = prj_consts.AML_WORKSPACE_CONFIG_FILE_NAME\n", + "\n", + "# # print debug info if needed \n", + "# workspace_config_dir \n", + "# ls_l(os.path.join(os.getcwd(), os.path.join(*([workspace_config_dir]))))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "###### Login into Azure may be required here\n", + "[Back](#user_input_requiring_steps) to summary of user input requiring steps." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING - Warning: Falling back to use azure cli login credentials.\n", + "If you run your code in unattended mode, i.e., where you can't give a user input, then we recommend to use ServicePrincipalAuthentication or MsiAuthentication.\n", + "Please refer to aka.ms/aml-notebook-auth for different authentication mechanisms in azureml-sdk.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Workspace configuration loading succeeded. \n" + ] + } + ], + "source": [ + "try:\n", + " ws1 = Workspace(\n", + " subscription_id = subscription_id, \n", + " resource_group = crt_resource_group, \n", + " workspace_name = crt_workspace_name,\n", + " auth=project_utils.get_auth(dotenv_file_path))\n", + " print(\"Workspace configuration loading succeeded. \")\n", + "# ws1.write_config(path=os.path.join(os.getcwd(), os.path.join(*([workspace_config_dir]))),\n", + "# file_name=workspace_config_file)\n", + " del ws1 # ws will be (re)created later using from_config() function\n", + "except Exception as e :\n", + " print('Exception msg: {}'.format(str(e )))\n", + " print(\"Workspace not accessible. Will create a new workspace below\")\n", + " \n", + " workspace_region = crt_workspace_region\n", + "\n", + " # Create the workspace using the specified parameters\n", + " ws2 = Workspace.create(name = crt_workspace_name,\n", + " subscription_id = subscription_id,\n", + " resource_group = crt_resource_group, \n", + " location = workspace_region,\n", + " create_resource_group = True,\n", + " exist_ok = False)\n", + " ws2.get_details()\n", + "\n", + " # persist the subscription id, resource group name, and workspace name in aml_config/config.json.\n", + " ws2.write_config(path=os.path.join(os.getcwd(), os.path.join(*([workspace_config_dir]))),\n", + " file_name=workspace_config_file)\n", + " \n", + " #Delete ws2 and use ws = Workspace.from_config() as shwon below to recover the ws, rather than rely on what we get from one time creation\n", + " del ws2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### 2.5. Demo access to created workspace\n", + "\n", + "From now on, even in other notebooks, the provisioned AML workspace will be accesible using Workspace.from_config() as shown below:" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "# path arg is:\n", + "# - a file path which explictly lists aml_config subdir for function from_config() \n", + "# - a dir path with a silently added <> subdir for function write_config(). \n", + "ws = Workspace.from_config(path=os.path.join(os.getcwd(), \n", + " os.path.join(*([workspace_config_dir, '.azureml', workspace_config_file]))))\n", + "# # print debug info if needed\n", + "# print(ws.name, ws.resource_group, ws.location, ws.subscription_id[0], sep = '\\n')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### 2.6. Create compute cluster used in following notebooks" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'gpuclstfwi02'" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "gpu_cluster_name = os.getenv('GPU_CLUSTER_NAME')\n", + "gpu_cluster_name" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found existing gpu cluster\n" + ] + } + ], + "source": [ + "max_nodes_value = 3\n", + "\n", + "try:\n", + " gpu_cluster = ComputeTarget(workspace=ws, name=gpu_cluster_name)\n", + " print(\"Found existing gpu cluster\")\n", + "except ComputeTargetException:\n", + " print(\"Could not find gpu cluster, please create one\")\n", + " \n", + "# # Specify the configuration for the new cluster, add admin_user_ssh_key='ssh-rsa ... ghiordan@microsoft.com' if needed\n", + "# compute_config = AmlCompute.provisioning_configuration(vm_size=\"Standard_NC12\",\n", + "# min_nodes=0,\n", + "# max_nodes=max_nodes_value,\n", + "# admin_username=os.getenv('GPU_CLUSTER_ADMIN_USER_NAME'), \n", + "# admin_user_password=os.getenv('GPU_CLUSTER_ADMIN_USER_NAME'))\n", + "# # Create the cluster with the specified name and configuration\n", + "# gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, compute_config)\n", + "\n", + "# # Wait for the cluster to complete, show the output log\n", + "# gpu_cluster.wait_for_completion(show_output=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### 2.7. Create an [ACR](https://docs.microsoft.com/en-us/azure/container-registry/) if you have not done so using the [portal](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-get-started-portal) \n", + " - Follow the 4 ACR steps described below. \n", + " - Uncomment cells' lines as needed to login and see commands responses while you set the right subscription and then create the ACR. \n", + " - You need [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) to run the commands below. \n", + "\n", + "\n", + "##### ACR Step 1. Select ACR subscription (az cli login into Azure may be required here)\n", + "[Back](#user_input_requiring_steps) to summary of user input requiring steps." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "azure-cli 2.0.58 *\r\n", + "\r\n", + "acr 2.2.0 *\r\n", + "acs 2.3.17 *\r\n", + "advisor 2.0.0 *\r\n", + "ams 0.4.1 *\r\n", + "appservice 0.2.13 *\r\n", + "backup 1.2.1 *\r\n", + "batch 3.4.1 *\r\n", + "batchai 0.4.7 *\r\n", + "billing 0.2.0 *\r\n", + "botservice 0.1.6 *\r\n", + "cdn 0.2.0 *\r\n", + "cloud 2.1.0 *\r\n", + "cognitiveservices 0.2.4 *\r\n", + "command-modules-nspkg 2.0.2 *\r\n", + "configure 2.0.20 *\r\n", + "consumption 0.4.2 *\r\n", + "container 0.3.13 *\r\n", + "core 2.0.58 *\r\n", + "cosmosdb 0.2.7 *\r\n", + "dla 0.2.4 *\r\n", + "dls 0.1.8 *\r\n", + "dms 0.1.2 *\r\n", + "eventgrid 0.2.1 *\r\n", + "eventhubs 0.3.3 *\r\n", + "extension 0.2.3 *\r\n", + "feedback 2.1.4 *\r\n", + "find 0.2.13 *\r\n", + "hdinsight 0.3.0 *\r\n", + "interactive 0.4.1 *\r\n", + "iot 0.3.6 *\r\n", + "iotcentral 0.1.6 *\r\n", + "keyvault 2.2.11 *\r\n", + "kusto 0.1.0 *\r\n", + "lab 0.1.5 *\r\n", + "maps 0.3.3 *\r\n", + "monitor 0.2.10 *\r\n", + "network 2.3.2 *\r\n", + "nspkg 3.0.3 *\r\n", + "policyinsights 0.1.1 *\r\n", + "profile 2.1.3 *\r\n", + "rdbms 0.3.7 *\r\n", + "redis 0.4.0 *\r\n", + "relay 0.1.3 *\r\n", + "reservations 0.4.1 *\r\n", + "resource 2.1.10 *\r\n", + "role 2.4.0 *\r\n", + "search 0.1.1 *\r\n", + "security 0.1.0 *\r\n", + "servicebus 0.3.3 *\r\n", + "servicefabric 0.1.12 *\r\n", + "signalr 1.0.0 *\r\n", + "sql 2.1.9 *\r\n", + "sqlvm 0.1.0 *\r\n", + "storage 2.3.1 *\r\n", + "telemetry 1.0.1 *\r\n", + "vm 2.2.15 *\r\n", + "\r\n", + "Extensions:\r\n", + "azure-ml-admin-cli 0.0.1\r\n", + "azure-cli-ml Unknown\r\n", + "\r\n", + "Python location '/opt/az/bin/python3'\r\n", + "Extensions directory '/opt/az/extensions'\r\n", + "\r\n", + "Python (Linux) 3.6.5 (default, Feb 12 2019, 02:10:43) \r\n", + "[GCC 5.4.0 20160609]\r\n", + "\r\n", + "Legal docs and information: aka.ms/AzureCliLegal\r\n", + "\r\n", + "\r\n", + "\u001b[33mYou have 57 updates available. Consider updating your CLI installation.\u001b[0m\r\n" + ] + } + ], + "source": [ + "!az --version\n", + "if create_ACR_FLAG:\n", + " !az login\n", + " response01 = ! az account list --all --refresh -o table\n", + " response02 = ! az account set --subscription $subscription_id\n", + " response03 = ! az account list -o table\n", + " response04 = ! $cli_command\n", + "\n", + " response01\n", + " response02\n", + " response03\n", + " response04" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### ACR Step 2. Create the ACR" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'az acr create --resource-group ghiordanfwirsg01 --name fwi01acr --sku Basic'" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "[' \"loginServer\": \"fwi01acr.azurecr.io\",',\n", + " ' \"name\": \"fwi01acr\",',\n", + " ' \"networkRuleSet\": null,',\n", + " ' \"provisioningState\": \"Succeeded\",',\n", + " ' \"resourceGroup\": \"ghiordanfwirsg01\",',\n", + " ' \"sku\": {',\n", + " ' \"name\": \"Basic\",',\n", + " ' \"tier\": \"Basic\"',\n", + " ' },',\n", + " ' \"status\": null,',\n", + " ' \"storageAccount\": null,',\n", + " ' \"tags\": {},',\n", + " ' \"type\": \"Microsoft.ContainerRegistry/registries\"',\n", + " '}']" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "%dotenv $dotenv_file_path\n", + "acr_name = os.getenv('ACR_NAME')\n", + "\n", + "cli_command='az acr create --resource-group '+ crt_resource_group +' --name ' + acr_name + ' --sku Basic'\n", + "cli_command\n", + "\n", + "response = !$cli_command\n", + "response[-14:]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### ACR Step 3. Also enable password and login via __ [--admin-enabled true](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-authentication) __ and then use the az cli or portal to set up the credentials" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'az acr update -n fwi01acr --admin-enabled true'" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# per https://docs.microsoft.com/en-us/azure/container-registry/container-registry-authentication\n", + "cli_command='az acr update -n '+acr_name+' --admin-enabled true'\n", + "cli_command\n", + "\n", + "response = !$cli_command\n", + "# response" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### ACR Step 4. Save the ACR password and login" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "# create_ACR_FLAG=False\n", + "if create_ACR_FLAG:\n", + " import subprocess\n", + " cli_command = 'az acr credential show -n '+acr_name\n", + "\n", + "acr_username = subprocess.Popen(cli_command+' --query username',shell=True,stdout=subprocess.PIPE, stderr=subprocess.PIPE).\\\n", + "communicate()[0].decode(\"utf-8\").split()[0].strip('\\\"')\n", + "\n", + "acr_password = subprocess.Popen(cli_command+' --query passwords[0].value',shell=True,stdout=subprocess.PIPE, stderr=subprocess.PIPE).\\\n", + "communicate()[0].decode(\"utf-8\").split()[0].strip('\\\"')\n", + "\n", + "response = dotenv.set_key(dotenv_file_path, 'ACR_PASSWORD', acr_password)\n", + "response = dotenv.set_key(dotenv_file_path, 'ACR_USERNAME', acr_username)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "%reload_ext dotenv\n", + "%dotenv -o $dotenv_file_path\n", + "\n", + "# print acr password and login info saved in dotenv file\n", + "if create_ACR_FLAG:\n", + " os.getenv('ACR_PASSWORD')\n", + " os.getenv('ACR_USERNAME')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "print('Finished running 000_Setup_GeophysicsTutorial_FWI_Azure_devito!')" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python [conda env:fwi_dev_conda_environment] *", + "language": "python", + "name": "conda-env-fwi_dev_conda_environment-py" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/contrib/fwi/azureml_devito/notebooks/010_CreateExperimentationDockerImage_GeophysicsTutorial_FWI_Azure_devito.ipynb b/contrib/fwi/azureml_devito/notebooks/010_CreateExperimentationDockerImage_GeophysicsTutorial_FWI_Azure_devito.ipynb new file mode 100755 index 00000000..aac30038 --- /dev/null +++ b/contrib/fwi/azureml_devito/notebooks/010_CreateExperimentationDockerImage_GeophysicsTutorial_FWI_Azure_devito.ipynb @@ -0,0 +1,1060 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Copyright (c) Microsoft Corporation. \n", + "Licensed under the MIT License.\n", + "\n", + "# FWI in Azure project\n", + "\n", + "## Create Experimentation Docker image\n", + "\n", + "FWI demo based on: \n", + "This project ports devito (https://github.com/opesci/devito) into Azure and runs tutorial notebooks at:\n", + "https://nbviewer.jupyter.org/github/opesci/devito/blob/master/examples/seismic/tutorials/\n", + "\n", + "\n", + "\n", + "In this notebook we create a custom docker image that will be used to run the devito demo notebooks in AzureML. \n", + "\n", + " - We transparently create a docker file, a conda environment .yml file, build the docker image and push it into dockerhub. Azure ACR could also be used for storing docker images. \n", + " - The conda environment .yml file lists conda and pip installs, and separates all python dependencies from the docker installs. \n", + " - The dockerfile is generic. The only AzureML depedency is azureml-sdk pip installable package in conda environment .yml file\n", + " - The created docer image will be run in following notebook in a container on the local AzureVM or on a remote AzureML compute cluster. This AzureML pattern decouples experimentation (or training) job definition (experimentation script, data location, dependencies and docker image) happening on the control plane machine that runs this notebook, from the elastically allocated and Azure managed VM/cluster that does the actual training/experimentation computation.\n", + " \n", + "\n", + "User input requiring steps:\n", + " - [Fill in and save docker image name settings, if needed. ](#docker_image_settings)\n", + " - [Update DOCKER_CONTAINER_MOUNT_POINT to match our local path](#docker_image_settings)\n", + " - [Set docker build and test flags](#docker_build_test_settings) \n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# Allow multiple displays per cell\n", + "from IPython.core.interactiveshell import InteractiveShell\n", + "InteractiveShell.ast_node_interactivity = \"all\" " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import sys, os\n", + "import shutil\n", + "import urllib\n", + "\n", + "import platform\n", + "import math\n", + "import docker" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Linux-4.15.0-1063-azure-x86_64-with-debian-stretch-sid'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "'/datadrive01/prj/DeepSeismic/contrib/fwi/azureml_devito/notebooks'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "platform.platform()\n", + "os.getcwd()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "#### Setup docker image build and test process. \n", + " - devito tests take abou 15 mins (981.41 seconds). When running this notebook for first time make:\n", + " > docker_build_no_cache = '--no-cache' \n", + " > docker_test_run_devito_tests = True\n", + " \n", + "[Back](#user_input_requiring_steps) to summary of user input requiring steps." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "docker_build_no_cache = '' # '--no-cache' # or '' #\n", + "docker_test_run_devito_tests = True # True # False" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Import utilities functions" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[None]" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "def add_path_to_sys_path(path_to_append):\n", + " if not (any(path_to_append in paths for paths in sys.path)):\n", + " sys.path.append(path_to_append)\n", + " \n", + "auxiliary_files_dir = os.path.join(*(['.', 'src']))\n", + "paths_to_append = [os.path.join(os.getcwd(), auxiliary_files_dir)]\n", + "[add_path_to_sys_path(crt_path) for crt_path in paths_to_append]\n", + "\n", + "import project_utils\n", + "prj_consts = project_utils.project_consts()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Create experimentation docker file" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'./../not_shared/general.env'" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "dotenv_file_path = os.path.join(*(prj_consts.DOTENV_FILE_PATH))\n", + "dotenv_file_path" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/datadrive01/prj/DeepSeismic/contrib/fwi/azureml_devito/notebooks\r\n" + ] + } + ], + "source": [ + "!pwd" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "# azureml_sdk_version set here must match azureml sdk version pinned in conda env file written to conda_common_file_path below\n", + "azureml_sdk_version = '1.0.76' " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "##### Input here docker image settings \n", + "in cell below we use [dotenv](https://github.com/theskumar/python-dotenv) to overwrite docker image properties already save in dotenv_file_path. Change as needed, e.g. update azureml_sdk version if using a different version.\n", + "\n", + "[Back](#user_input_requiring_steps) to summary of user input requiring steps." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(True, 'EXPERIMENTATION_DOCKER_IMAGE_TAG', 'sdk.v1.0.76')" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "(True,\n", + " 'DOCKER_CONTAINER_MOUNT_POINT',\n", + " '/datadrive01/prj/DeepSeismic/contrib/fwi/azureml_devito/notebooks')" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# SDK changes often, so we'll keep its version transparent \n", + "import dotenv\n", + "\n", + "# EXPERIMENTATION_IMAGE_VERSION should:\n", + "# - match sdk version in fwi01_conda_env01 environmnet in conda_env_fwi01_azureml_sdk.v1.0.XX.yml file below\n", + "# - match the conda env yml file name, e.g. conda_env_fwi01_azureml_sdk.v1.0.xx.yml referenced in \n", + "# Dockerfile_fwi01_azureml_sdk.v1.0.xx\n", + "# dotenv.set_key(dotenv_file_path, 'EXPERIMENTATION_DOCKER_IMAGE_NAME', 'fwi01_azureml')\n", + "dotenv.set_key(dotenv_file_path, 'EXPERIMENTATION_DOCKER_IMAGE_TAG', ('sdk.v'+azureml_sdk_version))\n", + "\n", + "\n", + "docker_container_mount_point = os.getcwd()\n", + "# or something like \"/datadrive01/prj/DeepSeismic/contrib/fwi/azureml_devito/notebooks'\n", + "dotenv.set_key(dotenv_file_path, 'DOCKER_CONTAINER_MOUNT_POINT', docker_container_mount_point)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'fwi01acr.azurecr.io/fwi01_azureml:sdk.v1.0.76'" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "'conda_env_fwi01_azureml_sdk.v1.0.76.yml'" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "'/datadrive01/prj/DeepSeismic/contrib/fwi/azureml_devito/notebooks/./../temp/docker_build/conda_env_fwi01_azureml_sdk.v1.0.76.yml'" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "'/datadrive01/prj/DeepSeismic/contrib/fwi/azureml_devito/notebooks/./../temp/docker_build/conda_env_fwi01_azureml.yml'" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "'/datadrive01/prj/DeepSeismic/contrib/fwi/azureml_devito/notebooks/./../temp/docker_build'" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "'/datadrive01/prj/DeepSeismic/contrib/fwi/azureml_devito/notebooks/./../temp/docker_build/Dockerfile_fwi01_azureml_sdk.v1.0.76'" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "%reload_ext dotenv\n", + "%dotenv $dotenv_file_path\n", + "\n", + "docker_file_location = os.path.join(*(prj_consts.AML_EXPERIMENT_DIR + ['docker_build']))\n", + "\n", + "docker_file_name = 'Dockerfile'+ '_' + os.getenv('EXPERIMENTATION_DOCKER_IMAGE_NAME')\n", + "\n", + "conda_dependency_file_name = 'conda_env'+ '_' + os.getenv('EXPERIMENTATION_DOCKER_IMAGE_NAME')\n", + "conda_dependency_common_file_name = conda_dependency_file_name\n", + "\n", + "devito_conda_dependency_file_name = 'devito_conda_env'+'.yml'\n", + "\n", + "docker_repo_name = os.getenv('ACR_NAME')+'.azurecr.io' # or os.getenv('DOCKER_LOGIN')\n", + "docker_image_name = docker_repo_name + '/' + os.getenv('EXPERIMENTATION_DOCKER_IMAGE_NAME')\n", + "\n", + "image_version = os.getenv('EXPERIMENTATION_DOCKER_IMAGE_TAG')\n", + "if image_version!=\"\":\n", + " docker_file_name = docker_file_name +'_'+ image_version\n", + " conda_dependency_file_name = conda_dependency_file_name+'_'+ image_version\n", + " docker_image_name = docker_image_name +':'+ image_version\n", + "conda_dependency_file_name=conda_dependency_file_name+'.yml'\n", + "conda_dependency_common_file_name = conda_dependency_common_file_name+'.yml'\n", + "\n", + "docker_file_dir = os.path.join(*([os.getcwd(), docker_file_location]))\n", + "os.makedirs(docker_file_dir, exist_ok=True)\n", + "docker_file_path = os.path.join(*([docker_file_dir]+[docker_file_name]))\n", + "conda_file_path = os.path.join(*([docker_file_dir]+[conda_dependency_file_name]))\n", + "conda_common_file_path = os.path.join(*([docker_file_dir]+[conda_dependency_common_file_name]))\n", + "\n", + "docker_image_name\n", + "\n", + "conda_dependency_file_name\n", + "conda_file_path\n", + "conda_common_file_path\n", + "\n", + "docker_file_dir\n", + "docker_file_path" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Writing /datadrive01/prj/DeepSeismic/contrib/fwi/azureml_devito/notebooks/./../temp/docker_build/conda_env_fwi01_azureml.yml\n" + ] + } + ], + "source": [ + "%%writefile $conda_common_file_path\n", + "name: fwi01_conda_env01\n", + " \n", + "#https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.13.1-cp37-cp37m-linux_x86_64.whl \n", + "# https://github.com/dask/dask-tutorial\n", + "\n", + "channels:\n", + " - anaconda\n", + " - conda-forge\n", + "dependencies:\n", + " - python=3.6 # 3.6 req by tf, not 3.7.2 \n", + " - dask\n", + " - distributed\n", + " - h5py\n", + " - matplotlib\n", + " - nb_conda\n", + " - notebook \n", + " - numpy \n", + " - pandas\n", + " - pip\n", + " - py-cpuinfo # all required by devito or dask-tutorial\n", + " - pytables\n", + " - python-graphviz\n", + " - requests\n", + " - pillow\n", + " - scipy\n", + " - snakeviz\n", + " - scikit-image\n", + " - toolz\n", + " - pip:\n", + " - anytree # required by devito\n", + " - azureml-sdk[notebooks,automl]==1.0.76\n", + " - codepy # required by devito\n", + " - papermill[azure]\n", + " - pyrevolve # required by devito" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Writing /datadrive01/prj/DeepSeismic/contrib/fwi/azureml_devito/notebooks/./../temp/docker_build/Dockerfile_fwi01_azureml_sdk.v1.0.76\n" + ] + } + ], + "source": [ + "%%writefile $docker_file_path \n", + "\n", + "FROM continuumio/miniconda3:4.7.10 \n", + "MAINTAINER George Iordanescu \n", + "\n", + "RUN apt-get update --fix-missing && apt-get install -y --no-install-recommends \\\n", + " gcc g++ \\\n", + " wget bzip2 \\\n", + " curl \\\n", + " git make \\\n", + " mpich \\ \n", + " libmpich-dev && \\\n", + " apt-get clean && \\\n", + " rm -rf /var/lib/apt/lists/*\n", + "\n", + "ENV CONDA_ENV_FILE_NAME conda_env_fwi01_azureml.yml\n", + "ADD $CONDA_ENV_FILE_NAME /tmp/$CONDA_ENV_FILE_NAME\n", + "ENV CONDA_DIR /opt/conda\n", + "ENV CONDA_ENV_NAME fwi01_conda_env\n", + "\n", + "RUN git clone https://github.com/opesci/devito.git && \\\n", + " cd devito && \\\n", + " /opt/conda/bin/conda env create -q --name $CONDA_ENV_NAME -f environment.yml && \\\n", + " pip install -e . \n", + " \n", + "ENV CONDA_AUTO_UPDATE_CONDA=false\n", + "ENV CONDA_DEFAULT_ENV=$CONDA_ENV_NAME\n", + "ENV CONDA_PREFIX=$CONDA_DIR/envs/$CONDA_DEFAULT_ENV\n", + "ENV PATH=$CONDA_PREFIX/bin:/opt/conda/bin:$PATH \n", + "\n", + "RUN /opt/conda/bin/conda env update --name $CONDA_ENV_NAME -f /tmp/$CONDA_ENV_FILE_NAME && \\\n", + " /opt/conda/bin/conda clean --yes --all\n", + "\n", + "ENV PYTHONPATH=$PYTHONPATH:devito/app\n", + "\n", + "# WORKDIR /devito \n", + " \n", + "CMD /bin/bash" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'/datadrive01/prj/DeepSeismic/contrib/fwi/azureml_devito/notebooks/./../temp/docker_build/conda_env_fwi01_azureml_sdk.v1.0.76.yml'" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "total 12\r\n", + "-rw-rw-r-- 1 loginvm022 loginvm022 725 Dec 6 15:26 conda_env_fwi01_azureml_sdk.v1.0.76.yml\r\n", + "-rw-rw-r-- 1 loginvm022 loginvm022 725 Dec 6 15:26 conda_env_fwi01_azureml.yml\r\n", + "-rw-rw-r-- 1 loginvm022 loginvm022 1073 Dec 6 15:26 Dockerfile_fwi01_azureml_sdk.v1.0.76\r\n" + ] + } + ], + "source": [ + "shutil.copyfile(conda_common_file_path, conda_file_path)\n", + "\n", + "! ls -l $docker_file_dir" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'docker build -t fwi01acr.azurecr.io/fwi01_azureml:sdk.v1.0.76 -f /datadrive01/prj/DeepSeismic/contrib/fwi/azureml_devito/notebooks/./../temp/docker_build/Dockerfile_fwi01_azureml_sdk.v1.0.76 /datadrive01/prj/DeepSeismic/contrib/fwi/azureml_devito/notebooks/./../temp/docker_build '" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "['Sending build context to Docker daemon 6.144kB',\n", + " '',\n", + " 'Step 1/15 : FROM continuumio/miniconda3:4.7.10',\n", + " '4.7.10: Pulling from continuumio/miniconda3',\n", + " '1ab2bdfe9778: Pulling fs layer']" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "[' ---> Running in 00c2824f0cd3',\n", + " 'Removing intermediate container 00c2824f0cd3',\n", + " ' ---> 48fb03897096',\n", + " 'Successfully built 48fb03897096',\n", + " 'Successfully tagged fwi01acr.azurecr.io/fwi01_azureml:sdk.v1.0.76']" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cli_command='docker build -t '+ docker_image_name + \\\n", + "' -f ' + docker_file_path + \\\n", + "' ' + docker_file_dir + ' ' +\\\n", + "docker_build_no_cache #'' #' --no-cache'\n", + "\n", + "\n", + "cli_command\n", + "docker_build_response = ! $cli_command\n", + "\n", + "docker_build_response[0:5] \n", + "docker_build_response[-5:] " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Docker containers can be run using python docker sdk" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'fwi01acr.azurecr.io/fwi01_azureml:sdk.v1.0.76'" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "'bash -c \"pwd;python -c \\'import azureml.core;print(azureml.core.VERSION)\\'\"'" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "b'/\\n1.0.76\\n'" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "docker_image_name\n", + "\n", + "sh_command='bash -c \"pwd;python -c \\'import azureml.core;print(azureml.core.VERSION)\\'\"'\n", + "sh_command\n", + "client = docker.from_env()\n", + "client.containers.run(docker_image_name, \n", + " remove=True,\n", + " volumes={os.getenv('DOCKER_CONTAINER_MOUNT_POINT'): {'bind': '/workspace', 'mode': 'rw'}},\n", + " working_dir='/',\n", + " command=sh_command)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Docker containers can also be run in cli \n", + "\n", + "Here we also create a log file to capture commands execution in container. If flag docker_test_run_devito_tests is True, we run \n", + "and capture test commands output. Tests take abou 15 minutes to run. If flag docker_test_run_devito_tests is False, we show the results of a previous session. " + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'./fwi01_azureml_buildexperimentationdockerimage.log'" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "fwi01_log_file = os.path.join(*(['.', 'fwi01_azureml_buildexperimentationdockerimage.log']))\n", + "fwi01_log_file" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Create command for running devito tests, capture output in a log file, save log file outside container" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "content of devito tests log file before testing:\n", + "Before running e13n container... \r\n" + ] + }, + { + "data": { + "text/plain": [ + "' python -m pytest tests/ > ./fwi01_azureml_buildexperimentationdockerimage.log 2>&1; mv ./fwi01_azureml_buildexperimentationdockerimage.log /workspace/'" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "if docker_test_run_devito_tests:\n", + " run_devito_tests_command = ' python -m pytest tests/ ' + \\\n", + "'> ' + fwi01_log_file +' 2>&1; ' + \\\n", + "' mv ' + fwi01_log_file + ' /workspace/' \n", + " \n", + " with open(os.path.join(*(['.', 'fwi01_azureml_buildexperimentationdockerimage.log'])), \"w\") as crt_log_file:\n", + " print('Before running e13n container... ', file=crt_log_file)\n", + " print('\\ncontent of devito tests log file before testing:')\n", + " !cat $fwi01_log_file\n", + "else:\n", + " run_devito_tests_command = '' \n", + "\n", + "# run_devito_tests_command = 'ls -l > ./fwi01_azureml_buildexperimentationdockerimage.log 2>&1; mv ./fwi01_azureml_buildexperimentationdockerimage.log /workspace/'\n", + "run_devito_tests_command" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'docker run -it --rm --name fwi01_azureml_container -v /datadrive01/prj/DeepSeismic/contrib/fwi/azureml_devito/notebooks:/workspace:rw fwi01acr.azurecr.io/fwi01_azureml:sdk.v1.0.76 /bin/bash -c \"conda env list ; ls -l /devito/tests; python -c \\'import azureml.core;print(azureml.core.VERSION)\\'; cd /devito; python -m pytest tests/ > ./fwi01_azureml_buildexperimentationdockerimage.log 2>&1; mv ./fwi01_azureml_buildexperimentationdockerimage.log /workspace/ \"'" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# conda environments:\n", + "#\n", + "base /opt/conda\n", + "fwi01_conda_env * /opt/conda/envs/fwi01_conda_env\n", + "\n", + "total 560\n", + "-rw-r--r-- 1 root root 11521 Dec 6 15:26 conftest.py\n", + "-rw-r--r-- 1 root root 6006 Dec 6 15:26 test_adjoint.py\n", + "-rw-r--r-- 1 root root 14586 Dec 6 15:26 test_autotuner.py\n", + "-rw-r--r-- 1 root root 7538 Dec 6 15:26 test_builtins.py\n", + "-rw-r--r-- 1 root root 24415 Dec 6 15:26 test_caching.py\n", + "-rw-r--r-- 1 root root 9721 Dec 6 15:26 test_checkpointing.py\n", + "-rw-r--r-- 1 root root 1095 Dec 6 15:26 test_constant.py\n", + "-rw-r--r-- 1 root root 55954 Dec 6 15:26 test_data.py\n", + "-rw-r--r-- 1 root root 481 Dec 6 15:26 test_dependency_bugs.py\n", + "-rw-r--r-- 1 root root 16331 Dec 6 15:26 test_derivatives.py\n", + "-rw-r--r-- 1 root root 1473 Dec 6 15:26 test_differentiable.py\n", + "-rw-r--r-- 1 root root 30846 Dec 6 15:26 test_dimension.py\n", + "-rw-r--r-- 1 root root 24838 Dec 6 15:26 test_dle.py\n", + "-rw-r--r-- 1 root root 1169 Dec 6 15:26 test_docstrings.py\n", + "-rw-r--r-- 1 root root 32134 Dec 6 15:26 test_dse.py\n", + "-rw-r--r-- 1 root root 8205 Dec 6 15:26 test_gradient.py\n", + "-rw-r--r-- 1 root root 15227 Dec 6 15:26 test_interpolation.py\n", + "-rw-r--r-- 1 root root 31816 Dec 6 15:26 test_ir.py\n", + "-rw-r--r-- 1 root root 63169 Dec 6 15:26 test_mpi.py\n", + "-rw-r--r-- 1 root root 67053 Dec 6 15:26 test_operator.py\n", + "-rw-r--r-- 1 root root 14875 Dec 6 15:26 test_ops.py\n", + "-rw-r--r-- 1 root root 12228 Dec 6 15:26 test_pickle.py\n", + "-rw-r--r-- 1 root root 1809 Dec 6 15:26 test_resample.py\n", + "-rw-r--r-- 1 root root 1754 Dec 6 15:26 test_save.py\n", + "-rw-r--r-- 1 root root 2115 Dec 6 15:26 test_staggered_utils.py\n", + "-rw-r--r-- 1 root root 5711 Dec 6 15:26 test_subdomains.py\n", + "-rw-r--r-- 1 root root 3320 Dec 6 15:26 test_symbolic_coefficients.py\n", + "-rw-r--r-- 1 root root 7277 Dec 6 15:26 test_tensors.py\n", + "-rw-r--r-- 1 root root 3186 Dec 6 15:26 test_timestepping.py\n", + "-rw-r--r-- 1 root root 603 Dec 6 15:26 test_tools.py\n", + "-rw-r--r-- 1 root root 3296 Dec 6 15:26 test_tti.py\n", + "-rw-r--r-- 1 root root 8835 Dec 6 15:26 test_visitors.py\n", + "-rw-r--r-- 1 root root 21802 Dec 6 15:26 test_yask.py\n", + "1.0.76\n", + "\n", + "content of devito tests log file after testing:\n", + "============================= test session starts ==============================\n", + "platform linux -- Python 3.6.9, pytest-5.3.1, py-1.8.0, pluggy-0.13.1\n", + "rootdir: /devito, inifile: setup.cfg\n", + "plugins: nbval-0.9.3, cov-2.8.1\n", + "collected 1056 items / 2 skipped / 1054 selected\n", + "\n", + "tests/test_adjoint.py .......................... [ 2%]\n", + "tests/test_autotuner.py ..........s..... [ 3%]\n", + "tests/test_builtins.py ....s...............s..s [ 6%]\n", + "tests/test_caching.py .................................................. [ 10%]\n", + " [ 10%]\n", + "tests/test_checkpointing.py ....... [ 11%]\n", + "tests/test_constant.py . [ 11%]\n", + "tests/test_data.py ..........................ssssssssssssssssss.ss.. [ 16%]\n", + "tests/test_dependency_bugs.py . [ 16%]\n", + "tests/test_derivatives.py .............................................. [ 20%]\n", + "........................................................................ [ 27%]\n", + "........................................................................ [ 34%]\n", + "...... [ 35%]\n", + "tests/test_differentiable.py .. [ 35%]\n", + "tests/test_dimension.py ............................... [ 38%]\n", + "tests/test_dle.py ...................................................... [ 43%]\n", + "........................................... [ 47%]\n", + "tests/test_docstrings.py ................ [ 48%]\n", + "tests/test_dse.py ......x............................................... [ 53%]\n", + "................x..........s.... [ 57%]\n", + "tests/test_gradient.py .... [ 57%]\n", + "tests/test_interpolation.py ........................ [ 59%]\n", + "tests/test_ir.py ....................................................... [ 64%]\n", + "................ [ 66%]\n", + "tests/test_mpi.py ssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 71%]\n", + "sss [ 71%]\n", + "tests/test_operator.py ................................................. [ 76%]\n", + "..............................................s......................... [ 83%]\n", + ".................................. [ 86%]\n", + "tests/test_pickle.py .................ss. [ 88%]\n", + "tests/test_resample.py . [ 88%]\n", + "tests/test_save.py .. [ 88%]\n", + "tests/test_staggered_utils.py ......... [ 89%]\n", + "tests/test_subdomains.py ... [ 89%]\n", + "tests/test_symbolic_coefficients.py .....F [ 90%]\n", + "tests/test_tensors.py .................................................. [ 95%]\n", + "........................... [ 97%]\n", + "tests/test_timestepping.py ....... [ 98%]\n", + "tests/test_tools.py ..... [ 98%]\n", + "tests/test_tti.py .... [ 99%]\n", + "tests/test_visitors.py ......... [100%]\n", + "\n", + "=================================== FAILURES ===================================\n", + "______________________ TestSC.test_function_coefficients _______________________\n", + "\n", + "self = \n", + "\n", + " def test_function_coefficients(self):\n", + " \"\"\"Test that custom function coefficients return the expected result\"\"\"\n", + " so = 2\n", + " grid = Grid(shape=(4, 4))\n", + " f0 = TimeFunction(name='f0', grid=grid, space_order=so, coefficients='symbolic')\n", + " f1 = TimeFunction(name='f1', grid=grid, space_order=so)\n", + " x, y = grid.dimensions\n", + " \n", + " s = Dimension(name='s')\n", + " ncoeffs = so+1\n", + " \n", + " wshape = list(grid.shape)\n", + " wshape.append(ncoeffs)\n", + " wshape = as_tuple(wshape)\n", + " \n", + " wdims = list(grid.dimensions)\n", + " wdims.append(s)\n", + " wdims = as_tuple(wdims)\n", + " \n", + " w = Function(name='w', dimensions=wdims, shape=wshape)\n", + " w.data[:, :, 0] = 0.0\n", + " w.data[:, :, 1] = -1.0/grid.spacing[0]\n", + " w.data[:, :, 2] = 1.0/grid.spacing[0]\n", + " \n", + " f_x_coeffs = Coefficient(1, f0, x, w)\n", + " \n", + " subs = Substitutions(f_x_coeffs)\n", + " \n", + " eq0 = Eq(f0.dt + f0.dx, 1, coefficients=subs)\n", + " eq1 = Eq(f1.dt + f1.dx, 1)\n", + " \n", + " stencil0 = solve(eq0.evaluate, f0.forward)\n", + " stencil1 = solve(eq1.evaluate, f1.forward)\n", + " \n", + " op0 = Operator(Eq(f0.forward, stencil0))\n", + " op1 = Operator(Eq(f1.forward, stencil1))\n", + " \n", + " op0(time_m=0, time_M=5, dt=1.0)\n", + " op1(time_m=0, time_M=5, dt=1.0)\n", + " \n", + "> assert np.all(np.isclose(f0.data[:] - f1.data[:], 0.0, atol=1e-5, rtol=0))\n", + "E assert Data(False)\n", + "E + where Data(False) = (Data([[[False, False, False, False],\\n [False, False, False, False],\\n [ True, True, True, True],\\n ...alse],\\n [False, False, False, False],\\n [False, False, False, False],\\n [ True, True, True, True]]]))\n", + "E + where = np.all\n", + "E + and Data([[[False, False, False, False],\\n [False, False, False, False],\\n [ True, True, True, True],\\n ...alse],\\n [False, False, False, False],\\n [False, False, False, False],\\n [ True, True, True, True]]]) = ((Data([[[-1452., -1452., -1452., -1452.],\\n [ 3327., 3327., 3327., 3327.],\\n [-3414., -3414., -3414., -341...3., 383., 383.],\\n [ -598., -598., -598., -598.],\\n [ 341., 341., 341., 341.]]], dtype=float32) - Data([[[-1451.9998 , -1451.9998 , -1451.9998 , -1451.9998 ],\\n [ 3326.9995 , 3326.9995 , 3326.9995 , 33...4 , -597.99994 , -597.99994 ],\\n [ 341. , 341. , 341. , 341. ]]],\\n dtype=float32)), 0.0, atol=1e-05, rtol=0)\n", + "E + where = np.isclose\n", + "\n", + "tests/test_symbolic_coefficients.py:96: AssertionError\n", + "----------------------------- Captured stderr call -----------------------------\n", + "/tmp/devito-jitcache-uid0/28a0c1d4f6f5711828a8c4cd1ff27eaa7607404e.c: In function ‘Kernel’:\n", + "/tmp/devito-jitcache-uid0/28a0c1d4f6f5711828a8c4cd1ff27eaa7607404e.c:39: warning: ignoring #pragma omp simd [-Wunknown-pragmas]\n", + " #pragma omp simd aligned(f0,w:32)\n", + " \n", + "Operator `Kernel` run in 0.01 s\n", + "/tmp/devito-jitcache-uid0/0031268cb9efe9dfa4f656da51efd0d4fa4b9d00.c: In function ‘Kernel’:\n", + "/tmp/devito-jitcache-uid0/0031268cb9efe9dfa4f656da51efd0d4fa4b9d00.c:38: warning: ignoring #pragma omp simd [-Wunknown-pragmas]\n", + " #pragma omp simd aligned(f1:32)\n", + " \n", + "Operator `Kernel` run in 0.01 s\n", + "------------------------------ Captured log call -------------------------------\n", + "INFO Devito:logger.py:129 Operator `Kernel` run in 0.01 s\n", + "INFO Devito:logger.py:129 Operator `Kernel` run in 0.01 s\n", + "====== 1 failed, 968 passed, 87 skipped, 2 xfailed in 1070.16s (0:17:50) =======\n" + ] + } + ], + "source": [ + "cli_command='docker run -it --rm --name fwi01_azureml_container ' +\\\n", + "' -v '+os.getenv('DOCKER_CONTAINER_MOUNT_POINT')+':/workspace:rw ' + \\\n", + "docker_image_name + \\\n", + "' /bin/bash -c \"conda env list ; ls -l /devito/tests; ' + \\\n", + "'python -c \\'import azureml.core;print(azureml.core.VERSION)\\'; ' + \\\n", + "'cd /devito; ' + \\\n", + "run_devito_tests_command +\\\n", + "' \"'\n", + "\n", + "cli_command\n", + "! $cli_command\n", + "# # ============= 774 passed, 70 skipped, 1 xfailed in 1106.76 seconds =============\n", + "print('\\ncontent of devito tests log file after testing:')\n", + "!cat $fwi01_log_file" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "###### Use the ACR created in previous notebook or docker hub to push your image" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'az acr login --name fwi01acr'" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Login Succeeded\r\n", + "WARNING! Your password will be stored unencrypted in /home/loginvm022/.docker/config.json.\r\n", + "Configure a credential helper to remove this warning. See\r\n", + "https://docs.docker.com/engine/reference/commandline/login/#credentials-store\r\n", + "\r\n", + "\u001b[0m" + ] + } + ], + "source": [ + "# docker_pwd = os.getenv('DOCKER_PWD')\n", + "# docker_login = os.getenv('DOCKER_LOGIN')\n", + "# !docker login -u=$docker_login -p=$docker_pwd\n", + "# !docker push {docker_image_name}\n", + "\n", + "%dotenv -o $dotenv_file_path\n", + "cli_command='az acr login --name '+os.getenv('ACR_NAME')\n", + "# print cli command\n", + "cli_command\n", + "\n", + "# run cli command\n", + "cli_command = cli_command+' --username '+os.getenv('ACR_USERNAME') + ' --password ' + os.getenv('ACR_PASSWORD')\n", + "! $cli_command" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'docker push fwi01acr.azurecr.io/fwi01_azureml:sdk.v1.0.76'" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cli_command='docker push '+docker_image_name\n", + "cli_command" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The push refers to repository [fwi01acr.azurecr.io/fwi01_azureml]\n", + "\n", + "\u001b[1Bd6300f53: Preparing \n", + "\u001b[1B01af7f6b: Preparing \n", + "\u001b[1B41f0b573: Preparing \n", + "\u001b[1B04ca5654: Preparing \n", + "\u001b[1Bf8fc4c9a: Preparing \n", + "\u001b[1Bba47210e: Preparing \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[6B01af7f6b: Pushing 1.484GB/3.028GBA\u001b[2K\u001b[6A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[2A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[4A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[4A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[7Bd6300f53: Pushing 3.026GB/3.028GB\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2KPushing 2.58GB/2.968GB\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[6B01af7f6b: Pushed 3.103GB/3.028GB\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[7A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2K\u001b[6A\u001b[2Ksdk.v1.0.76: digest: sha256:416dc7ce59c279822e967223790f7b8b7d99ba62bc643ca44b94551135b60b6b size: 1800\n" + ] + } + ], + "source": [ + "! $cli_command" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Finished running 010_CreateExperimentationDockerImage_GeophysicsTutorial_FWI_Azure_devito!\n" + ] + } + ], + "source": [ + "# !jupyter nbconvert 010_CreateExperimentationDockerImage_GeophysicsTutorial_FWI_Azure_devito --to html\n", + "print('Finished running 010_CreateExperimentationDockerImage_GeophysicsTutorial_FWI_Azure_devito!')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/contrib/fwi/azureml_devito/notebooks/020_UseAzureMLEstimatorForExperimentation_GeophysicsTutorial_FWI_Azure_devito.ipynb b/contrib/fwi/azureml_devito/notebooks/020_UseAzureMLEstimatorForExperimentation_GeophysicsTutorial_FWI_Azure_devito.ipynb new file mode 100755 index 00000000..db76f4c5 --- /dev/null +++ b/contrib/fwi/azureml_devito/notebooks/020_UseAzureMLEstimatorForExperimentation_GeophysicsTutorial_FWI_Azure_devito.ipynb @@ -0,0 +1,1003 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Copyright (c) Microsoft Corporation. \n", + "Licensed under the MIT License. \n", + " \n", + " \n", + "# FWI demo based on: \n", + "This project ports devito (https://github.com/opesci/devito) into Azure and runs tutorial notebooks at:\n", + "https://nbviewer.jupyter.org/github/opesci/devito/blob/master/examples/seismic/tutorials/\n", + "\n", + "\n", + "\n", + "In this notebook we run the devito demo [notebooks](https://nbviewer.jupyter.org/github/opesci/devito/blob/master/examples/seismic/tutorials/) mentioned above by using an [AzureML estimator](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.estimator.estimator?view=azure-ml-py) with custom docker image. The docker image and associated docker file were created in previous notebook.\n", + "\n", + "\n", + "#### This notebook is used as a control plane to submit experimentation jobs running devito in Azure in two modes (see [remote run azureml python script file invoking devito](#devito_demo_mode)):\n", + " - [Mode 1](#devito_demo_mode_1):\n", + " - uses custom code (slightly modified graphing functions save images to files too) \n", + " - experimentation job is defined by the devito code that is packaged as a py file to be run on an Azure remote compute target\n", + " - experimentation job can be used to track metrics or other artifacts (images)\n", + " \n", + " - Mode 2:\n", + " - papermill is invoked via its Python API to run unedited devito demo notebooks (https://github.com/opesci/devito/tree/master/examples/seismic/tutorials) on the remote compute target and get back the results as saved notebooks that are then Available in Azure portal. \n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# Allow multiple displays per cell\n", + "from IPython.core.interactiveshell import InteractiveShell\n", + "InteractiveShell.ast_node_interactivity = \"all\" " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import sys, os\n", + "import shutil\n", + "import urllib\n", + "import azureml.core\n", + "from azureml.core import Workspace, Experiment\n", + "from azureml.core.compute import ComputeTarget, AmlCompute\n", + "from azureml.core.compute_target import ComputeTargetException\n", + "from azureml.core.runconfig import MpiConfiguration\n", + "\n", + "\n", + "# from azureml.core.datastore import Datastore\n", + "# from azureml.data.data_reference import DataReference\n", + "# from azureml.pipeline.steps import HyperDriveStep\n", + "# from azureml.pipeline.core import Pipeline, PipelineData\n", + "# from azureml.train.dnn import TensorFlow\n", + "\n", + "from azureml.train.estimator import Estimator\n", + "from azureml.widgets import RunDetails\n", + "\n", + "import platform" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Azure ML SDK Version: 1.0.76\n" + ] + }, + { + "data": { + "text/plain": [ + "'Linux-4.15.0-1063-azure-x86_64-with-debian-stretch-sid'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "'/datadrive01/prj/DeepSeismic/contrib/fwi/azureml_devito/notebooks'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "print(\"Azure ML SDK Version: \", azureml.core.VERSION)\n", + "platform.platform()\n", + "os.getcwd()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[None]" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "'./../not_shared/general.env'" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "def add_path_to_sys_path(path_to_append):\n", + " if not (any(path_to_append in paths for paths in sys.path)):\n", + " sys.path.append(path_to_append)\n", + " \n", + "auxiliary_files_dir = os.path.join(*(['.', 'src']))\n", + "paths_to_append = [os.path.join(os.getcwd(), auxiliary_files_dir)]\n", + "[add_path_to_sys_path(crt_path) for crt_path in paths_to_append]\n", + "\n", + "import project_utils\n", + "prj_consts = project_utils.project_consts()\n", + "\n", + "dotenv_file_path = os.path.join(*(prj_consts.DOTENV_FILE_PATH))\n", + "dotenv_file_path" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext dotenv" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'./../not_shared'" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "workspace_config_dir = os.path.join(*(prj_consts.AML_WORKSPACE_CONFIG_DIR))\n", + "workspace_config_file = prj_consts.AML_WORKSPACE_CONFIG_FILE_NAME\n", + "workspace_config_dir" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'./../temp/devito_tutorial/01_modelling.py'" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "'./../temp/devito_tutorial/azureml_01_modelling.py'" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "%dotenv $dotenv_file_path\n", + "\n", + "script_folder = prj_consts.AML_EXPERIMENT_DIR + ['devito_tutorial']\n", + "\n", + "devito_training_script_file = '01_modelling.py' # hardcoded in file azureml_training_script_full_file_name below\n", + "azureml_training_script_file = 'azureml_'+devito_training_script_file\n", + "experimentName = '020_AzureMLEstimator'\n", + "\n", + "os.makedirs(os.path.join(*(script_folder)), exist_ok=True)\n", + "script_path = os.path.join(*(script_folder))\n", + "training_script_full_file_name = os.path.join(script_path, devito_training_script_file)\n", + "azureml_training_script_full_file_name = os.path.join(script_path, azureml_training_script_file)\n", + "\n", + "training_script_full_file_name\n", + "azureml_training_script_full_file_name" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + " \n", + "##### devito in Azure ML demo mode 1\n", + "Create devito demo script based on \n", + "https://nbviewer.jupyter.org/github/opesci/devito/blob/master/examples/seismic/tutorials/01_modelling.ipynb\n", + "\n", + "[Back](#devito_in_AzureML_demoing_modes) to summary of modes od demoing devito in AzureML.\n", + "\n", + "Main purpose of this script is to extend _plot_velocity()_ and _plot_shotrecord()_ devito [plotting functions](https://github.com/opesci/devito/blob/master/examples/seismic/plotting.py) to allow the mto work in batch mode, i.e. save output to a file." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting ./../temp/devito_tutorial/01_modelling.py\n" + ] + } + ], + "source": [ + "%%writefile $training_script_full_file_name\n", + "\n", + "import numpy as np\n", + "import os, argparse\n", + "\n", + "from examples.seismic import Model\n", + "from examples.seismic import TimeAxis\n", + "from examples.seismic import Receiver\n", + "from devito import TimeFunction\n", + "from devito import Eq, solve\n", + "from devito import Operator\n", + "\n", + "\n", + "# try:\n", + "import matplotlib as mpl\n", + "import matplotlib.pyplot as plt\n", + "from matplotlib import cm\n", + "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", + "\n", + "mpl.rc('font', size=16)\n", + "mpl.rc('figure', figsize=(8, 6))\n", + "# except:\n", + "# plt = None\n", + "# cm = None\n", + " \n", + "\n", + "\n", + "# \"all\" plotting utils in devito do not save to file, so we extend them here\n", + "# https://github.com/opesci/devito/blob/master/examples/seismic/plotting.py\n", + "def plot_velocity(model, source=None, receiver=None, colorbar=True, file=None):\n", + " \"\"\"\n", + " Plot a two-dimensional velocity field from a seismic `Model`\n", + " object. Optionally also includes point markers for sources and receivers.\n", + "\n", + " Parameters\n", + " ----------\n", + " model : Model\n", + " Object that holds the velocity model.\n", + " source : array_like or float\n", + " Coordinates of the source point.\n", + " receiver : array_like or float\n", + " Coordinates of the receiver points.\n", + " colorbar : bool\n", + " Option to plot the colorbar.\n", + " \"\"\"\n", + " domain_size = 1.e-3 * np.array(model.domain_size)\n", + " extent = [model.origin[0], model.origin[0] + domain_size[0],\n", + " model.origin[1] + domain_size[1], model.origin[1]]\n", + "\n", + " plot = plt.imshow(np.transpose(model.vp.data), animated=True, cmap=cm.jet,\n", + " vmin=np.min(model.vp.data), vmax=np.max(model.vp.data),\n", + " extent=extent)\n", + " plt.xlabel('X position (km)')\n", + " plt.ylabel('Depth (km)')\n", + "\n", + " # Plot source points, if provided\n", + " if receiver is not None:\n", + " plt.scatter(1e-3*receiver[:, 0], 1e-3*receiver[:, 1],\n", + " s=25, c='green', marker='D')\n", + "\n", + " # Plot receiver points, if provided\n", + " if source is not None:\n", + " plt.scatter(1e-3*source[:, 0], 1e-3*source[:, 1],\n", + " s=25, c='red', marker='o')\n", + "\n", + " # Ensure axis limits\n", + " plt.xlim(model.origin[0], model.origin[0] + domain_size[0])\n", + " plt.ylim(model.origin[1] + domain_size[1], model.origin[1])\n", + "\n", + " # Create aligned colorbar on the right\n", + " if colorbar:\n", + " ax = plt.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " cbar = plt.colorbar(plot, cax=cax)\n", + " cbar.set_label('Velocity (km/s)')\n", + " plt.show()\n", + " \n", + " if file is not None:\n", + " plt.savefig(file)\n", + " print('plotted image saved as {} file'.format(file))\n", + " \n", + " plt.clf()\n", + "\n", + "def plot_shotrecord(rec, model, t0, tn, colorbar=True, file=None):\n", + " \"\"\"\n", + " Plot a shot record (receiver values over time).\n", + "\n", + " Parameters\n", + " ----------\n", + " rec :\n", + " Receiver data with shape (time, points).\n", + " model : Model\n", + " object that holds the velocity model.\n", + " t0 : int\n", + " Start of time dimension to plot.\n", + " tn : int\n", + " End of time dimension to plot.\n", + " \"\"\"\n", + " scale = np.max(rec) / 10.\n", + " extent = [model.origin[0], model.origin[0] + 1e-3*model.domain_size[0],\n", + " 1e-3*tn, t0]\n", + "\n", + " plot = plt.imshow(rec, vmin=-scale, vmax=scale, cmap=cm.gray, extent=extent)\n", + " plt.xlabel('X position (km)')\n", + " plt.ylabel('Time (s)')\n", + "\n", + " # Create aligned colorbar on the right\n", + " if colorbar:\n", + " ax = plt.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " plt.colorbar(plot, cax=cax)\n", + " plt.show() \n", + " \n", + " if file is not None:\n", + " plt.savefig(file)\n", + " print('plotted image saved as {} file'.format(file))\n", + " \n", + " plt.clf()\n", + "\n", + "def main(output_folder): \n", + " # 1. Define the physical problem\n", + " # The first step is to define the physical model:\n", + " # - physical dimensions of interest\n", + " # - velocity profile of this physical domain\n", + "\n", + " # Define a physical size\n", + " shape = (101, 101) # Number of grid point (nx, nz)\n", + " spacing = (10., 10.) # Grid spacing in m. The domain size is now 1km by 1km\n", + " origin = (0., 0.) # What is the location of the top left corner. This is necessary to define\n", + " # the absolute location of the source and receivers\n", + "\n", + " # Define a velocity profile. The velocity is in km/s\n", + " v = np.empty(shape, dtype=np.float32)\n", + " v[:, :51] = 1.5\n", + " v[:, 51:] = 2.5\n", + "\n", + " # With the velocity and model size defined, we can create the seismic model that\n", + " # encapsulates this properties. We also define the size of the absorbing layer as 10 grid points\n", + " model = Model(vp=v, origin=origin, shape=shape, spacing=spacing,\n", + " space_order=2, nbpml=10)\n", + "\n", + " plot_velocity(model, \n", + " file= os.path.join(*( [output_folder,'output000.png'])))\n", + " \n", + " # 2. Acquisition geometry\n", + " t0 = 0. # Simulation starts a t=0\n", + " tn = 1000. # Simulation last 1 second (1000 ms)\n", + " dt = model.critical_dt # Time step from model grid spacing\n", + "\n", + " time_range = TimeAxis(start=t0, stop=tn, step=dt)\n", + " from examples.seismic import RickerSource\n", + "\n", + " f0 = 0.010 # Source peak frequency is 10Hz (0.010 kHz)\n", + " src = RickerSource(name='src', grid=model.grid, f0=f0,\n", + " npoint=1, time_range=time_range)\n", + "\n", + " # First, position source centrally in all dimensions, then set depth\n", + " src.coordinates.data[0, :] = np.array(model.domain_size) * .5\n", + " src.coordinates.data[0, -1] = 20. # Depth is 20m\n", + "\n", + " # We can plot the time signature to see the wavelet\n", + "# src.show()\n", + "\n", + " # Create symbol for 101 receivers\n", + " rec = Receiver(name='rec', grid=model.grid, npoint=101, time_range=time_range)\n", + "\n", + " # Prescribe even spacing for receivers along the x-axis\n", + " rec.coordinates.data[:, 0] = np.linspace(0, model.domain_size[0], num=101)\n", + " rec.coordinates.data[:, 1] = 20. # Depth is 20m\n", + "\n", + " # We can now show the source and receivers within our domain:\n", + " # Red dot: Source location\n", + " # Green dots: Receiver locations (every 4th point)\n", + " plot_velocity(model, source=src.coordinates.data,\n", + " receiver=rec.coordinates.data[::4, :], \n", + " file= os.path.join(*( [output_folder,'output010.png'])))\n", + " \n", + " # Define the wavefield with the size of the model and the time dimension\n", + " u = TimeFunction(name=\"u\", grid=model.grid, time_order=2, space_order=2)\n", + "\n", + " # We can now write the PDE\n", + " pde = model.m * u.dt2 - u.laplace + model.damp * u.dt\n", + "\n", + " # The PDE representation is as on paper\n", + " pde\n", + " \n", + " # This discrete PDE can be solved in a time-marching way updating u(t+dt) from the previous time step\n", + " # Devito as a shortcut for u(t+dt) which is u.forward. We can then rewrite the PDE as \n", + " # a time marching updating equation known as a stencil using customized SymPy functions\n", + "\n", + " stencil = Eq(u.forward, solve(pde, u.forward))\n", + " # Finally we define the source injection and receiver read function to generate the corresponding code\n", + " src_term = src.inject(field=u.forward, expr=src * dt**2 / model.m)\n", + "\n", + " # Create interpolation expression for receivers\n", + " rec_term = rec.interpolate(expr=u.forward)\n", + "\n", + " op = Operator([stencil] + src_term + rec_term, subs=model.spacing_map)\n", + " \n", + " op(time=time_range.num-1, dt=model.critical_dt)\n", + " plot_shotrecord(rec.data, model, t0, tn, \n", + " file= os.path.join(*( [output_folder,'output020.png'])))\n", + "\n", + "if __name__ == \"__main__\":\n", + " parser = argparse.ArgumentParser()\n", + " parser.add_argument('--output_folder', type=str, nargs='?', \\\n", + " dest='output_folder', help='ouput artifacts location',\\\n", + " default='.')\n", + " args = parser.parse_args()\n", + " \n", + " main(args.output_folder)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Get experimentation docker image for devito" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'fwi01_azureml:sdk.v1.0.76'" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "'fwi01acr.azurecr.io/fwi01_azureml:sdk.v1.0.76'" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "docker_repo_name = os.getenv('ACR_NAME')+'.azurecr.io' # or os.getenv('DOCKER_LOGIN')\n", + "docker_image_name = os.getenv('EXPERIMENTATION_DOCKER_IMAGE_NAME')\n", + "\n", + "image_version = os.getenv('EXPERIMENTATION_DOCKER_IMAGE_TAG')\n", + "if image_version!=\"\":\n", + " docker_image_name = docker_image_name +':'+ image_version\n", + "\n", + "full_docker_image_name = docker_repo_name + '/' + docker_image_name\n", + " \n", + "docker_image_name\n", + "full_docker_image_name" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Extract/decide the python path in custom docker image that corresponds to desired conda environment. Without this, AzureML tries to create a separate environment." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Login Succeeded\r\n", + "WARNING! Your password will be stored unencrypted in /home/loginvm022/.docker/config.json.\r\n", + "Configure a credential helper to remove this warning. See\r\n", + "https://docs.docker.com/engine/reference/commandline/login/#credentials-store\r\n", + "\r\n", + "\u001b[0m" + ] + } + ], + "source": [ + "%dotenv $dotenv_file_path\n", + "cli_command='az acr login --name '+\\\n", + "os.getenv('ACR_NAME')+\\\n", + "' --username '+os.getenv('ACR_USERNAME') + ' --password ' + os.getenv('ACR_PASSWORD')\n", + "! $cli_command" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'docker run -i --rm --name fwi01_azureml_container02 fwi01acr.azurecr.io/fwi01_azureml:sdk.v1.0.76 /bin/bash -c \"which python\" '" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "'/opt/conda/envs/fwi01_conda_env/bin/python'" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "get_Python_path_command='docker run -i --rm --name fwi01_azureml_container02 '+ \\\n", + "full_docker_image_name + \\\n", + "' /bin/bash -c \"which python\" '\n", + "get_Python_path_command\n", + "\n", + "\n", + "import subprocess\n", + "python_path_in_docker_image = subprocess.check_output(get_Python_path_command,shell=True,stderr=subprocess.STDOUT).\\\n", + "decode('utf-8').strip()\n", + "python_path_in_docker_image" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "#### Create azureml_script_file that invokes:\n", + " - devito exclusive custom edited training_script_file\n", + " - unedited devito notebooks via papermill (invoked via cli and via ppapermill python API)\n", + "\n", + "[Back](#devito_in_AzureML_demoing_modes) to notebook summary." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting ./../temp/devito_tutorial/azureml_01_modelling.py\n" + ] + } + ], + "source": [ + "%%writefile $azureml_training_script_full_file_name\n", + "\n", + "import argparse\n", + "import os\n", + "os.system('conda env list')\n", + "\n", + "import azureml.core;\n", + "from azureml.core.run import Run\n", + "\n", + "print(azureml.core.VERSION)\n", + "\n", + "parser = argparse.ArgumentParser()\n", + "parser.add_argument('--output_folder', type=str, dest='output_folder', help='ouput artifacts location')\n", + "\n", + "args = parser.parse_args()\n", + "print('args.output_folder is {} but it will be ignored since AzureML_tracked ./outputs will be used'.format(args.output_folder))\n", + "\n", + "# get the Azure ML run object\n", + "run = Run.get_context()\n", + "\n", + "# ./outputs/ folder is autotracked so should get uploaded at the end of the run\n", + "output_dir_AzureML_tracked = './outputs'\n", + "\n", + "crt_dir = os.getcwd()\n", + "\n", + "cli_command= \\\n", + "'cd /devito; /opt/conda/envs/fwi01_conda_env/bin/python '+ crt_dir +'/01_modelling.py' + \\\n", + "' --output_folder '+ crt_dir + output_dir_AzureML_tracked+ '/' + \\\n", + "' > '+ crt_dir + output_dir_AzureML_tracked + '/01_modelling.log' \n", + "# + \\\n", + "# ' 2>&1 ' + crt_dir +'/'+ output_dir_AzureML_tracked + '/devito_cli_py.log'\n", + "print('Running devito from cli on 01_modelling.py----BEGIN-----:') \n", + "print(cli_command); print('\\n');os.system(cli_command)\n", + "print('Running devito from cli on 01_modelling.py----END-----:\\n\\n')\n", + "\n", + "cli_command= \\\n", + "'cd /devito; papermill ' + \\\n", + "'./examples/seismic/tutorials/02_rtm.ipynb '+\\\n", + "crt_dir +'/outputs/02_rtm_output.ipynb ' + \\\n", + "'--log-output --no-progress-bar --kernel python3 ' + \\\n", + "' > '+ crt_dir + output_dir_AzureML_tracked + '/02_rtm_output.log' \n", + "# + \\\n", + "# ' 2>&1 ' + crt_dir +'/'+ output_dir_AzureML_tracked + '/papermill_cli.log'\n", + "\n", + "# FIXME - activate right conda env for running papermill from cli\n", + "activate_right_conda_env_fixed = False\n", + "if activate_right_conda_env_fixed:\n", + " print('Running papermill from cli on 02_rtm.ipynb----BEGIN-----:') \n", + " print(cli_command); print('\\n');os.system(cli_command)\n", + " print('Running papermill from cli on 02_rtm.ipynb----END-----:\\n\\n') \n", + "\n", + "\n", + "print('Running papermill from Python API on 03_fwi.ipynb----BEGIN-----:') \n", + "import papermill as pm\n", + "os.chdir('/devito')\n", + "pm.execute_notebook(\n", + " './examples/seismic/tutorials/03_fwi.ipynb',\n", + " crt_dir +'/outputs/03_fwi_output.ipynb'\n", + ")\n", + "print('Running papermill from Python API on 03_fwi.ipynb----END-----:') \n", + "\n", + "print('Running papermill from Python API on 04_dask.ipynb----BEGIN-----:') \n", + "import papermill as pm\n", + "os.chdir('/devito')\n", + "pm.execute_notebook(\n", + " './examples/seismic/tutorials/04_dask.ipynb',\n", + " crt_dir +'/outputs/04_dask_output.ipynb'\n", + ")\n", + "print('Running papermill from Python API on 04_dask.ipynb----END-----:') \n", + " \n", + "\n", + "os.system('pwd')\n", + "os.system('ls -l /')\n", + "os.system('ls -l ./')\n", + "os.system('ls -l ' +crt_dir + output_dir_AzureML_tracked)\n", + "run.log('training_message01: ', 'finished experiment')\n", + "print('\\n')" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['azureml_01_modelling.py', '01_modelling.py']" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "script_path=os.path.join(*(script_folder))\n", + "os.listdir(script_path)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Initialize workspace\n", + "\n", + "Initialize a workspace object from persisted configuration. If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure the config file is present at .\\config.json" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING - Warning: Falling back to use azure cli login credentials.\n", + "If you run your code in unattended mode, i.e., where you can't give a user input, then we recommend to use ServicePrincipalAuthentication or MsiAuthentication.\n", + "Please refer to aka.ms/aml-notebook-auth for different authentication mechanisms in azureml-sdk.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Workspace name: ghiordanfwiws\n", + "Azure region: eastus2\n", + "Subscription id: 7899\n" + ] + } + ], + "source": [ + "ws = Workspace.from_config(\n", + " path=os.path.join(os.getcwd(),\n", + " os.path.join(*([workspace_config_dir, '.azureml', workspace_config_file]))))\n", + "print('Workspace name: ' + ws.name, \n", + " 'Azure region: ' + ws.location, \n", + " 'Subscription id: ' + ws.subscription_id[0:4], sep = '\\n')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create an Azure ML experiment\n", + "Let's create an experiment named \"tf-mnist\" and a folder to hold the training scripts. The script runs will be recorded under the experiment in Azure." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "exp = Experiment(workspace=ws, name=experimentName)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Retrieve or create a Azure Machine Learning compute\n", + "Azure Machine Learning Compute is a service for provisioning and managing clusters of Azure virtual machines for running machine learning workloads. Let's create a new Azure Machine Learning Compute in the current workspace, if it doesn't already exist. We will then run the training script on this compute target.\n", + "\n", + "If we could not find the compute with the given name in the previous cell, then we will create a new compute here. This process is broken down into the following steps:\n", + "\n", + "1. Create the configuration\n", + "2. Create the Azure Machine Learning compute\n", + "\n", + "**This process will take a few minutes and is providing only sparse output in the process. Please make sure to wait until the call returns before moving to the next cell.**" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'gpuclstfwi02'" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "gpu_cluster_name = os.getenv('GPU_CLUSTER_NAME')\n", + "gpu_cluster_name" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found existing gpu cluster\n" + ] + } + ], + "source": [ + "# Verify that cluster does not exist already\n", + "max_nodes_value = 5\n", + "try:\n", + " gpu_cluster = ComputeTarget(workspace=ws, name=gpu_cluster_name)\n", + " print(\"Found existing gpu cluster\")\n", + "except ComputeTargetException:\n", + " print(\"Could not find ComputeTarget cluster!\")\n", + " \n", + "# # Create a new gpucluster using code below\n", + "# # Specify the configuration for the new cluster\n", + "# compute_config = AmlCompute.provisioning_configuration(vm_size=\"Standard_NC6\",\n", + "# min_nodes=0,\n", + "# max_nodes=max_nodes_value)\n", + "# # Create the cluster with the specified name and configuration\n", + "# gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, compute_config)\n", + "\n", + "# # Wait for the cluster to complete, show the output log\n", + "# gpu_cluster.wait_for_completion(show_output=True)\n", + " \n", + " \n", + "# for demo purposes, show how clsuter properties can be altered post-creation\n", + "gpu_cluster.update(min_nodes=0, max_nodes=max_nodes_value, idle_seconds_before_scaledown=1200)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Create an Azure ML SDK estimator with custom docker image " + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "565b952db744469fa2137b6c94e15f7a", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "_UserRunWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': False, 'log_level': 'NOTSET',…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/aml.mini.widget.v1": "{\"status\": \"Completed\", \"workbench_run_details_uri\": \"https://ml.azure.com/experiments/020_AzureMLEstimator/runs/020_AzureMLEstimator_1575674728_d40baeba?wsid=/subscriptions/789908e0-5fc2-4c4d-b5f5-9764b0d602b3/resourcegroups/ghiordanfwirsg01/workspaces/ghiordanfwiws\", \"run_id\": \"020_AzureMLEstimator_1575674728_d40baeba\", \"run_properties\": {\"run_id\": \"020_AzureMLEstimator_1575674728_d40baeba\", \"created_utc\": \"2019-12-06T23:25:30.597858Z\", \"properties\": {\"_azureml.ComputeTargetType\": \"amlcompute\", \"ContentSnapshotId\": \"a5071b2a-37a7-40da-8340-69cc894091cb\", \"azureml.git.repository_uri\": \"git@github.com:georgeAccnt-GH/DeepSeismic.git\", \"mlflow.source.git.repoURL\": \"git@github.com:georgeAccnt-GH/DeepSeismic.git\", \"azureml.git.branch\": \"staging\", \"mlflow.source.git.branch\": \"staging\", \"azureml.git.commit\": \"1d3cd3340f4063508b6f707d5fc2a35f5429a07f\", \"mlflow.source.git.commit\": \"1d3cd3340f4063508b6f707d5fc2a35f5429a07f\", \"azureml.git.dirty\": \"True\", \"ProcessInfoFile\": \"azureml-logs/process_info.json\", \"ProcessStatusFile\": \"azureml-logs/process_status.json\"}, \"tags\": {\"_aml_system_ComputeTargetStatus\": \"{\\\"AllocationState\\\":\\\"steady\\\",\\\"PreparingNodeCount\\\":1,\\\"RunningNodeCount\\\":0,\\\"CurrentNodeCount\\\":1}\"}, \"script_name\": null, \"arguments\": null, \"end_time_utc\": \"2019-12-06T23:34:26.039772Z\", \"status\": \"Completed\", \"log_files\": {\"azureml-logs/55_azureml-execution-tvmps_d8d8a91061fed6f3a36a0e0da11655ae12488195551133265afca81050ad2db4_d.txt\": \"https://ghiordanstoragee145cef0b.blob.core.windows.net/azureml/ExperimentRun/dcid.020_AzureMLEstimator_1575674728_d40baeba/azureml-logs/55_azureml-execution-tvmps_d8d8a91061fed6f3a36a0e0da11655ae12488195551133265afca81050ad2db4_d.txt?sv=2019-02-02&sr=b&sig=1Fz2ltrBSXhF9tDzTuEOv35mBsOLsf%2BCVuTEuSCRWdg%3D&st=2019-12-06T23%3A24%3A44Z&se=2019-12-07T07%3A34%3A44Z&sp=r\", \"azureml-logs/65_job_prep-tvmps_d8d8a91061fed6f3a36a0e0da11655ae12488195551133265afca81050ad2db4_d.txt\": \"https://ghiordanstoragee145cef0b.blob.core.windows.net/azureml/ExperimentRun/dcid.020_AzureMLEstimator_1575674728_d40baeba/azureml-logs/65_job_prep-tvmps_d8d8a91061fed6f3a36a0e0da11655ae12488195551133265afca81050ad2db4_d.txt?sv=2019-02-02&sr=b&sig=PwHIdkWadtTAj29WuPOCF3g0RSrWdriOmKhqdjZNm3I%3D&st=2019-12-06T23%3A24%3A44Z&se=2019-12-07T07%3A34%3A44Z&sp=r\", \"azureml-logs/70_driver_log.txt\": \"https://ghiordanstoragee145cef0b.blob.core.windows.net/azureml/ExperimentRun/dcid.020_AzureMLEstimator_1575674728_d40baeba/azureml-logs/70_driver_log.txt?sv=2019-02-02&sr=b&sig=Iz8WkiOv%2BkEXeOox8p3P8XkLIdb8pjhCO%2Bo8slYUBGk%3D&st=2019-12-06T23%3A24%3A44Z&se=2019-12-07T07%3A34%3A44Z&sp=r\", \"azureml-logs/75_job_post-tvmps_d8d8a91061fed6f3a36a0e0da11655ae12488195551133265afca81050ad2db4_d.txt\": \"https://ghiordanstoragee145cef0b.blob.core.windows.net/azureml/ExperimentRun/dcid.020_AzureMLEstimator_1575674728_d40baeba/azureml-logs/75_job_post-tvmps_d8d8a91061fed6f3a36a0e0da11655ae12488195551133265afca81050ad2db4_d.txt?sv=2019-02-02&sr=b&sig=gz88u5ZC%2B7N8QospVRIL8zd%2FEyQKbljoZXQD01jAyXM%3D&st=2019-12-06T23%3A24%3A44Z&se=2019-12-07T07%3A34%3A44Z&sp=r\", \"azureml-logs/process_info.json\": \"https://ghiordanstoragee145cef0b.blob.core.windows.net/azureml/ExperimentRun/dcid.020_AzureMLEstimator_1575674728_d40baeba/azureml-logs/process_info.json?sv=2019-02-02&sr=b&sig=4nj2pjm1rtKIjBmyudNaBEX6ITd3Gm%2BQLEUgjDYVBIc%3D&st=2019-12-06T23%3A24%3A44Z&se=2019-12-07T07%3A34%3A44Z&sp=r\", \"azureml-logs/process_status.json\": \"https://ghiordanstoragee145cef0b.blob.core.windows.net/azureml/ExperimentRun/dcid.020_AzureMLEstimator_1575674728_d40baeba/azureml-logs/process_status.json?sv=2019-02-02&sr=b&sig=NQLsveMtGHBEYsmiwoPvPpOv%2B6wabnQp2IwDrVjh49Q%3D&st=2019-12-06T23%3A24%3A44Z&se=2019-12-07T07%3A34%3A44Z&sp=r\", \"logs/azureml/729_azureml.log\": \"https://ghiordanstoragee145cef0b.blob.core.windows.net/azureml/ExperimentRun/dcid.020_AzureMLEstimator_1575674728_d40baeba/logs/azureml/729_azureml.log?sv=2019-02-02&sr=b&sig=HpwLZSHX0J%2B2eWILTIDA7%2BmpVIEF0%2BIFfM2LHgYGk8w%3D&st=2019-12-06T23%3A24%3A43Z&se=2019-12-07T07%3A34%3A43Z&sp=r\", \"logs/azureml/azureml.log\": \"https://ghiordanstoragee145cef0b.blob.core.windows.net/azureml/ExperimentRun/dcid.020_AzureMLEstimator_1575674728_d40baeba/logs/azureml/azureml.log?sv=2019-02-02&sr=b&sig=g%2Fi60CvATRGwaeQM9b6QihJxeFX0jTl%2BOKELCYYQ3rM%3D&st=2019-12-06T23%3A24%3A43Z&se=2019-12-07T07%3A34%3A43Z&sp=r\"}, \"log_groups\": [[\"azureml-logs/process_info.json\", \"azureml-logs/process_status.json\", \"logs/azureml/azureml.log\"], [\"azureml-logs/55_azureml-execution-tvmps_d8d8a91061fed6f3a36a0e0da11655ae12488195551133265afca81050ad2db4_d.txt\"], [\"azureml-logs/65_job_prep-tvmps_d8d8a91061fed6f3a36a0e0da11655ae12488195551133265afca81050ad2db4_d.txt\"], [\"azureml-logs/70_driver_log.txt\"], [\"azureml-logs/75_job_post-tvmps_d8d8a91061fed6f3a36a0e0da11655ae12488195551133265afca81050ad2db4_d.txt\"], [\"logs/azureml/729_azureml.log\"]], \"run_duration\": \"0:08:55\"}, \"child_runs\": [], \"children_metrics\": {}, \"run_metrics\": [{\"name\": \"training_message01: \", \"run_id\": \"020_AzureMLEstimator_1575674728_d40baeba\", \"categories\": [0], \"series\": [{\"data\": [\"finished experiment\"]}]}], \"run_logs\": \"2019-12-06 23:32:41,989|azureml|DEBUG|Inputs:: kwargs: {'OutputCollection': True, 'snapshotProject': True, 'only_in_process_features': True, 'skip_track_logs_dir': True}, track_folders: None, deny_list: None, directories_to_watch: []\\n2019-12-06 23:32:41,989|azureml.history._tracking.PythonWorkingDirectory|DEBUG|Execution target type: batchai\\n2019-12-06 23:32:41,990|azureml.history._tracking.PythonWorkingDirectory|DEBUG|Failed to import pyspark with error: No module named 'pyspark'\\n2019-12-06 23:32:41,990|azureml.history._tracking.PythonWorkingDirectory.workingdir|DEBUG|Pinning working directory for filesystems: ['pyfs']\\n2019-12-06 23:32:42,323|azureml._base_sdk_common.user_agent|DEBUG|Fetching client info from /root/.azureml/clientinfo.json\\n2019-12-06 23:32:42,323|azureml._base_sdk_common.user_agent|DEBUG|Error loading client info: [Errno 2] No such file or directory: '/root/.azureml/clientinfo.json'\\n2019-12-06 23:32:42,721|azureml.core._experiment_method|DEBUG|Trying to register submit_function search, on method \\n2019-12-06 23:32:42,721|azureml.core._experiment_method|DEBUG|Registered submit_function search, on method \\n2019-12-06 23:32:42,722|azureml.core._experiment_method|DEBUG|Trying to register submit_function search, on method \\n2019-12-06 23:32:42,722|azureml.core._experiment_method|DEBUG|Registered submit_function search, on method \\n2019-12-06 23:32:42,722|azureml.core.run|DEBUG|Adding new factory for run source hyperdrive\\n2019-12-06 23:32:43,300|azureml.core.run|DEBUG|Adding new factory for run source azureml.PipelineRun\\n2019-12-06 23:32:43,306|azureml.core.run|DEBUG|Adding new factory for run source azureml.ReusedStepRun\\n2019-12-06 23:32:43,311|azureml.core.run|DEBUG|Adding new factory for run source azureml.StepRun\\n2019-12-06 23:32:43,316|azureml.core.run|DEBUG|Adding new factory for run source azureml.scriptrun\\n2019-12-06 23:32:43,318|azureml.core.authentication.TokenRefresherDaemon|DEBUG|Starting daemon and triggering first instance\\n2019-12-06 23:32:43,324|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-06 23:32:43,325|azureml._restclient.clientbase|INFO|Created a worker pool for first use\\n2019-12-06 23:32:43,325|azureml.core.authentication|DEBUG|Time to expire 1813966.674698 seconds\\n2019-12-06 23:32:43,325|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-06 23:32:43,325|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-06 23:32:43,325|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-06 23:32:43,325|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-06 23:32:43,325|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-06 23:32:43,325|azureml._base_sdk_common.service_discovery|DEBUG|Constructing mms service url in from history url environment variable None, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-06 23:32:43,326|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-06 23:32:43,326|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-06 23:32:43,326|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-06 23:32:43,356|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-06 23:32:43,361|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-06 23:32:43,369|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-06 23:32:43,374|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-06 23:32:43,379|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-06 23:32:43,385|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-06 23:32:43,385|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.RunClient.get-async:False|DEBUG|[START]\\n2019-12-06 23:32:43,386|msrest.service_client|DEBUG|Accept header absent and forced to application/json\\n2019-12-06 23:32:43,386|msrest.http_logger|DEBUG|Request URL: 'https://eastus2.experiments.azureml.net/history/v1.0/subscriptions/789908e0-5fc2-4c4d-b5f5-9764b0d602b3/resourceGroups/ghiordanfwirsg01/providers/Microsoft.MachineLearningServices/workspaces/ghiordanfwiws/experiments/020_AzureMLEstimator/runs/020_AzureMLEstimator_1575674728_d40baeba'\\n2019-12-06 23:32:43,387|msrest.http_logger|DEBUG|Request method: 'GET'\\n2019-12-06 23:32:43,387|msrest.http_logger|DEBUG|Request headers:\\n2019-12-06 23:32:43,387|msrest.http_logger|DEBUG| 'Accept': 'application/json'\\n2019-12-06 23:32:43,387|msrest.http_logger|DEBUG| 'Content-Type': 'application/json; charset=utf-8'\\n2019-12-06 23:32:43,387|msrest.http_logger|DEBUG| 'x-ms-client-request-id': '2a72fb1c-fdba-4e6d-a244-7315dcdf5d54'\\n2019-12-06 23:32:43,387|msrest.http_logger|DEBUG| 'request-id': '2a72fb1c-fdba-4e6d-a244-7315dcdf5d54'\\n2019-12-06 23:32:43,387|msrest.http_logger|DEBUG| 'User-Agent': 'python/3.6.9 (Linux-4.15.0-1057-azure-x86_64-with-debian-10.0) msrest/0.6.10 azureml._restclient/core.1.0.76'\\n2019-12-06 23:32:43,387|msrest.http_logger|DEBUG|Request body:\\n2019-12-06 23:32:43,387|msrest.http_logger|DEBUG|None\\n2019-12-06 23:32:43,387|msrest.universal_http|DEBUG|Configuring redirects: allow=True, max=30\\n2019-12-06 23:32:43,387|msrest.universal_http|DEBUG|Configuring request: timeout=100, verify=True, cert=None\\n2019-12-06 23:32:43,387|msrest.universal_http|DEBUG|Configuring proxies: ''\\n2019-12-06 23:32:43,387|msrest.universal_http|DEBUG|Evaluate proxies against ENV settings: True\\n2019-12-06 23:32:43,442|msrest.http_logger|DEBUG|Response status: 200\\n2019-12-06 23:32:43,443|msrest.http_logger|DEBUG|Response headers:\\n2019-12-06 23:32:43,443|msrest.http_logger|DEBUG| 'Date': 'Fri, 06 Dec 2019 23:32:43 GMT'\\n2019-12-06 23:32:43,443|msrest.http_logger|DEBUG| 'Content-Type': 'application/json; charset=utf-8'\\n2019-12-06 23:32:43,443|msrest.http_logger|DEBUG| 'Transfer-Encoding': 'chunked'\\n2019-12-06 23:32:43,443|msrest.http_logger|DEBUG| 'Connection': 'keep-alive'\\n2019-12-06 23:32:43,443|msrest.http_logger|DEBUG| 'Vary': 'Accept-Encoding'\\n2019-12-06 23:32:43,443|msrest.http_logger|DEBUG| 'Request-Context': 'appId=cid-v1:2d2e8e63-272e-4b3c-8598-4ee570a0e70d'\\n2019-12-06 23:32:43,443|msrest.http_logger|DEBUG| 'x-ms-client-request-id': '2a72fb1c-fdba-4e6d-a244-7315dcdf5d54'\\n2019-12-06 23:32:43,444|msrest.http_logger|DEBUG| 'x-ms-client-session-id': ''\\n2019-12-06 23:32:43,444|msrest.http_logger|DEBUG| 'Strict-Transport-Security': 'max-age=15724800; includeSubDomains; preload'\\n2019-12-06 23:32:43,444|msrest.http_logger|DEBUG| 'X-Content-Type-Options': 'nosniff'\\n2019-12-06 23:32:43,444|msrest.http_logger|DEBUG| 'Content-Encoding': 'gzip'\\n2019-12-06 23:32:43,444|msrest.http_logger|DEBUG|Response content:\\n2019-12-06 23:32:43,444|msrest.http_logger|DEBUG|{\\n \\\"runNumber\\\": 1516,\\n \\\"rootRunId\\\": \\\"020_AzureMLEstimator_1575674728_d40baeba\\\",\\n \\\"experimentId\\\": \\\"8d96276b-f420-4a67-86be-f933dd3d38cd\\\",\\n \\\"createdUtc\\\": \\\"2019-12-06T23:25:30.5978583+00:00\\\",\\n \\\"createdBy\\\": {\\n \\\"userObjectId\\\": \\\"b77869a0-66f2-4288-89ef-13c10accc4dc\\\",\\n \\\"userPuId\\\": \\\"1003000090A95868\\\",\\n \\\"userIdp\\\": null,\\n \\\"userAltSecId\\\": null,\\n \\\"userIss\\\": \\\"https://sts.windows.net/72f988bf-86f1-41af-91ab-2d7cd011db47/\\\",\\n \\\"userTenantId\\\": \\\"72f988bf-86f1-41af-91ab-2d7cd011db47\\\",\\n \\\"userName\\\": \\\"George Iordanescu\\\"\\n },\\n \\\"userId\\\": \\\"b77869a0-66f2-4288-89ef-13c10accc4dc\\\",\\n \\\"token\\\": null,\\n \\\"tokenExpiryTimeUtc\\\": null,\\n \\\"error\\\": null,\\n \\\"warnings\\\": null,\\n \\\"revision\\\": 10,\\n \\\"runId\\\": \\\"020_AzureMLEstimator_1575674728_d40baeba\\\",\\n \\\"parentRunId\\\": null,\\n \\\"status\\\": \\\"Running\\\",\\n \\\"startTimeUtc\\\": \\\"2019-12-06T23:30:15.4122862+00:00\\\",\\n \\\"endTimeUtc\\\": null,\\n \\\"heartbeatEnabled\\\": false,\\n \\\"options\\\": {\\n \\\"generateDataContainerIdIfNotSpecified\\\": true\\n },\\n \\\"name\\\": null,\\n \\\"dataContainerId\\\": \\\"dcid.020_AzureMLEstimator_1575674728_d40baeba\\\",\\n \\\"description\\\": null,\\n \\\"hidden\\\": false,\\n \\\"runType\\\": \\\"azureml.scriptrun\\\",\\n \\\"properties\\\": {\\n \\\"_azureml.ComputeTargetType\\\": \\\"amlcompute\\\",\\n \\\"ContentSnapshotId\\\": \\\"a5071b2a-37a7-40da-8340-69cc894091cb\\\",\\n \\\"azureml.git.repository_uri\\\": \\\"git@github.com:georgeAccnt-GH/DeepSeismic.git\\\",\\n \\\"mlflow.source.git.repoURL\\\": \\\"git@github.com:georgeAccnt-GH/DeepSeismic.git\\\",\\n \\\"azureml.git.branch\\\": \\\"staging\\\",\\n \\\"mlflow.source.git.branch\\\": \\\"staging\\\",\\n \\\"azureml.git.commit\\\": \\\"1d3cd3340f4063508b6f707d5fc2a35f5429a07f\\\",\\n \\\"mlflow.source.git.commit\\\": \\\"1d3cd3340f4063508b6f707d5fc2a35f5429a07f\\\",\\n \\\"azureml.git.dirty\\\": \\\"True\\\",\\n \\\"ProcessInfoFile\\\": \\\"azureml-logs/process_info.json\\\",\\n \\\"ProcessStatusFile\\\": \\\"azureml-logs/process_status.json\\\"\\n },\\n \\\"scriptName\\\": \\\"azureml_01_modelling.py\\\",\\n \\\"target\\\": \\\"gpuclstfwi02\\\",\\n \\\"tags\\\": {\\n \\\"_aml_system_ComputeTargetStatus\\\": \\\"{\\\\\\\"AllocationState\\\\\\\":\\\\\\\"steady\\\\\\\",\\\\\\\"PreparingNodeCount\\\\\\\":1,\\\\\\\"RunningNodeCount\\\\\\\":0,\\\\\\\"CurrentNodeCount\\\\\\\":1}\\\"\\n },\\n \\\"inputDatasets\\\": [],\\n \\\"runDefinition\\\": null,\\n \\\"createdFrom\\\": {\\n \\\"type\\\": \\\"Notebook\\\",\\n \\\"locationType\\\": \\\"ArtifactId\\\",\\n \\\"location\\\": \\\"LocalUpload/020_AzureMLEstimator_1575674728_d40baeba/020_UseAzureMLEstimatorForExperimentation_GeophysicsTutorial_FWI_Azure_devito.ipynb\\\"\\n },\\n \\\"cancelUri\\\": \\\"https://eastus2.experiments.azureml.net/execution/v1.0/subscriptions/789908e0-5fc2-4c4d-b5f5-9764b0d602b3/resourceGroups/ghiordanfwirsg01/providers/Microsoft.MachineLearningServices/workspaces/ghiordanfwiws/experiments/020_AzureMLEstimator/runId/020_AzureMLEstimator_1575674728_d40baeba/cancel\\\",\\n \\\"completeUri\\\": null,\\n \\\"diagnosticsUri\\\": \\\"https://eastus2.experiments.azureml.net/execution/v1.0/subscriptions/789908e0-5fc2-4c4d-b5f5-9764b0d602b3/resourceGroups/ghiordanfwirsg01/providers/Microsoft.MachineLearningServices/workspaces/ghiordanfwiws/experiments/020_AzureMLEstimator/runId/020_AzureMLEstimator_1575674728_d40baeba/diagnostics\\\",\\n \\\"computeRequest\\\": {\\n \\\"nodeCount\\\": 1\\n },\\n \\\"retainForLifetimeOfWorkspace\\\": false\\n}\\n2019-12-06 23:32:43,449|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.RunClient.get-async:False|DEBUG|[STOP]\\n2019-12-06 23:32:43,450|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba|DEBUG|Constructing run from dto. type: azureml.scriptrun, source: None, props: {'_azureml.ComputeTargetType': 'amlcompute', 'ContentSnapshotId': 'a5071b2a-37a7-40da-8340-69cc894091cb', 'azureml.git.repository_uri': 'git@github.com:georgeAccnt-GH/DeepSeismic.git', 'mlflow.source.git.repoURL': 'git@github.com:georgeAccnt-GH/DeepSeismic.git', 'azureml.git.branch': 'staging', 'mlflow.source.git.branch': 'staging', 'azureml.git.commit': '1d3cd3340f4063508b6f707d5fc2a35f5429a07f', 'mlflow.source.git.commit': '1d3cd3340f4063508b6f707d5fc2a35f5429a07f', 'azureml.git.dirty': 'True', 'ProcessInfoFile': 'azureml-logs/process_info.json', 'ProcessStatusFile': 'azureml-logs/process_status.json'}\\n2019-12-06 23:32:43,450|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunContextManager|DEBUG|Valid logs dir, setting up content loader\\n2019-12-06 23:32:43,451|azureml|WARNING|Could not import azureml.mlflow or azureml.contrib.mlflow mlflow APIs will not run against AzureML services. Add azureml-mlflow as a conda dependency for the run if this behavior is desired\\n2019-12-06 23:32:43,451|azureml.WorkerPool|DEBUG|[START]\\n2019-12-06 23:32:43,451|azureml.SendRunKillSignal|DEBUG|[START]\\n2019-12-06 23:32:43,451|azureml.RunStatusContext|DEBUG|[START]\\n2019-12-06 23:32:43,451|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunContextManager.RunStatusContext|DEBUG|[START]\\n2019-12-06 23:32:43,451|azureml.WorkingDirectoryCM|DEBUG|[START]\\n2019-12-06 23:32:43,451|azureml.history._tracking.PythonWorkingDirectory.workingdir|DEBUG|[START]\\n2019-12-06 23:32:43,451|azureml.history._tracking.PythonWorkingDirectory|INFO|Current working dir: /mnt/batch/tasks/shared/LS_root/jobs/ghiordanfwiws/azureml/020_azuremlestimator_1575674728_d40baeba/mounts/workspaceblobstore/azureml/020_AzureMLEstimator_1575674728_d40baeba\\n2019-12-06 23:32:43,451|azureml.history._tracking.PythonWorkingDirectory.workingdir|DEBUG|Calling pyfs\\n2019-12-06 23:32:43,451|azureml.history._tracking.PythonWorkingDirectory.workingdir|DEBUG|Storing working dir for pyfs as /mnt/batch/tasks/shared/LS_root/jobs/ghiordanfwiws/azureml/020_azuremlestimator_1575674728_d40baeba/mounts/workspaceblobstore/azureml/020_AzureMLEstimator_1575674728_d40baeba\\n2019-12-06 23:32:45,592|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-06 23:32:45,592|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-06 23:32:45,592|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-06 23:32:45,592|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-06 23:32:45,592|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-06 23:32:45,592|azureml._base_sdk_common.service_discovery|DEBUG|Constructing mms service url in from history url environment variable None, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-06 23:32:45,592|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-06 23:32:45,593|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-06 23:32:45,593|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-06 23:32:45,599|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-06 23:32:45,600|azureml._run_impl.run_history_facade|DEBUG|Created a static thread pool for RunHistoryFacade class\\n2019-12-06 23:32:45,605|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-06 23:32:45,610|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-06 23:32:45,616|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-06 23:32:45,621|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-06 23:32:45,622|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.RunClient.get-async:False|DEBUG|[START]\\n2019-12-06 23:32:45,622|msrest.service_client|DEBUG|Accept header absent and forced to application/json\\n2019-12-06 23:32:45,622|msrest.http_logger|DEBUG|Request URL: 'https://eastus2.experiments.azureml.net/history/v1.0/subscriptions/789908e0-5fc2-4c4d-b5f5-9764b0d602b3/resourceGroups/ghiordanfwirsg01/providers/Microsoft.MachineLearningServices/workspaces/ghiordanfwiws/experiments/020_AzureMLEstimator/runs/020_AzureMLEstimator_1575674728_d40baeba'\\n2019-12-06 23:32:45,622|msrest.http_logger|DEBUG|Request method: 'GET'\\n2019-12-06 23:32:45,622|msrest.http_logger|DEBUG|Request headers:\\n2019-12-06 23:32:45,622|msrest.http_logger|DEBUG| 'Accept': 'application/json'\\n2019-12-06 23:32:45,622|msrest.http_logger|DEBUG| 'Content-Type': 'application/json; charset=utf-8'\\n2019-12-06 23:32:45,623|msrest.http_logger|DEBUG| 'x-ms-client-request-id': '7502a986-27e5-47c2-8a48-e5501a0dda7c'\\n2019-12-06 23:32:45,623|msrest.http_logger|DEBUG| 'request-id': '7502a986-27e5-47c2-8a48-e5501a0dda7c'\\n2019-12-06 23:32:45,623|msrest.http_logger|DEBUG| 'User-Agent': 'python/3.6.9 (Linux-4.15.0-1057-azure-x86_64-with-debian-10.0) msrest/0.6.10 azureml._restclient/core.1.0.76'\\n2019-12-06 23:32:45,623|msrest.http_logger|DEBUG|Request body:\\n2019-12-06 23:32:45,623|msrest.http_logger|DEBUG|None\\n2019-12-06 23:32:45,623|msrest.universal_http|DEBUG|Configuring redirects: allow=True, max=30\\n2019-12-06 23:32:45,623|msrest.universal_http|DEBUG|Configuring request: timeout=100, verify=True, cert=None\\n2019-12-06 23:32:45,623|msrest.universal_http|DEBUG|Configuring proxies: ''\\n2019-12-06 23:32:45,623|msrest.universal_http|DEBUG|Evaluate proxies against ENV settings: True\\n2019-12-06 23:32:46,018|msrest.http_logger|DEBUG|Response status: 200\\n2019-12-06 23:32:46,018|msrest.http_logger|DEBUG|Response headers:\\n2019-12-06 23:32:46,018|msrest.http_logger|DEBUG| 'Date': 'Fri, 06 Dec 2019 23:32:46 GMT'\\n2019-12-06 23:32:46,019|msrest.http_logger|DEBUG| 'Content-Type': 'application/json; charset=utf-8'\\n2019-12-06 23:32:46,019|msrest.http_logger|DEBUG| 'Transfer-Encoding': 'chunked'\\n2019-12-06 23:32:46,019|msrest.http_logger|DEBUG| 'Connection': 'keep-alive'\\n2019-12-06 23:32:46,019|msrest.http_logger|DEBUG| 'Vary': 'Accept-Encoding'\\n2019-12-06 23:32:46,019|msrest.http_logger|DEBUG| 'Request-Context': 'appId=cid-v1:2d2e8e63-272e-4b3c-8598-4ee570a0e70d'\\n2019-12-06 23:32:46,019|msrest.http_logger|DEBUG| 'x-ms-client-request-id': '7502a986-27e5-47c2-8a48-e5501a0dda7c'\\n2019-12-06 23:32:46,019|msrest.http_logger|DEBUG| 'x-ms-client-session-id': ''\\n2019-12-06 23:32:46,019|msrest.http_logger|DEBUG| 'Strict-Transport-Security': 'max-age=15724800; includeSubDomains; preload'\\n2019-12-06 23:32:46,019|msrest.http_logger|DEBUG| 'X-Content-Type-Options': 'nosniff'\\n2019-12-06 23:32:46,019|msrest.http_logger|DEBUG| 'Content-Encoding': 'gzip'\\n2019-12-06 23:32:46,019|msrest.http_logger|DEBUG|Response content:\\n2019-12-06 23:32:46,019|msrest.http_logger|DEBUG|{\\n \\\"runNumber\\\": 1516,\\n \\\"rootRunId\\\": \\\"020_AzureMLEstimator_1575674728_d40baeba\\\",\\n \\\"experimentId\\\": \\\"8d96276b-f420-4a67-86be-f933dd3d38cd\\\",\\n \\\"createdUtc\\\": \\\"2019-12-06T23:25:30.5978583+00:00\\\",\\n \\\"createdBy\\\": {\\n \\\"userObjectId\\\": \\\"b77869a0-66f2-4288-89ef-13c10accc4dc\\\",\\n \\\"userPuId\\\": \\\"1003000090A95868\\\",\\n \\\"userIdp\\\": null,\\n \\\"userAltSecId\\\": null,\\n \\\"userIss\\\": \\\"https://sts.windows.net/72f988bf-86f1-41af-91ab-2d7cd011db47/\\\",\\n \\\"userTenantId\\\": \\\"72f988bf-86f1-41af-91ab-2d7cd011db47\\\",\\n \\\"userName\\\": \\\"George Iordanescu\\\"\\n },\\n \\\"userId\\\": \\\"b77869a0-66f2-4288-89ef-13c10accc4dc\\\",\\n \\\"token\\\": null,\\n \\\"tokenExpiryTimeUtc\\\": null,\\n \\\"error\\\": null,\\n \\\"warnings\\\": null,\\n \\\"revision\\\": 10,\\n \\\"runId\\\": \\\"020_AzureMLEstimator_1575674728_d40baeba\\\",\\n \\\"parentRunId\\\": null,\\n \\\"status\\\": \\\"Running\\\",\\n \\\"startTimeUtc\\\": \\\"2019-12-06T23:30:15.4122862+00:00\\\",\\n \\\"endTimeUtc\\\": null,\\n \\\"heartbeatEnabled\\\": false,\\n \\\"options\\\": {\\n \\\"generateDataContainerIdIfNotSpecified\\\": true\\n },\\n \\\"name\\\": null,\\n \\\"dataContainerId\\\": \\\"dcid.020_AzureMLEstimator_1575674728_d40baeba\\\",\\n \\\"description\\\": null,\\n \\\"hidden\\\": false,\\n \\\"runType\\\": \\\"azureml.scriptrun\\\",\\n \\\"properties\\\": {\\n \\\"_azureml.ComputeTargetType\\\": \\\"amlcompute\\\",\\n \\\"ContentSnapshotId\\\": \\\"a5071b2a-37a7-40da-8340-69cc894091cb\\\",\\n \\\"azureml.git.repository_uri\\\": \\\"git@github.com:georgeAccnt-GH/DeepSeismic.git\\\",\\n \\\"mlflow.source.git.repoURL\\\": \\\"git@github.com:georgeAccnt-GH/DeepSeismic.git\\\",\\n \\\"azureml.git.branch\\\": \\\"staging\\\",\\n \\\"mlflow.source.git.branch\\\": \\\"staging\\\",\\n \\\"azureml.git.commit\\\": \\\"1d3cd3340f4063508b6f707d5fc2a35f5429a07f\\\",\\n \\\"mlflow.source.git.commit\\\": \\\"1d3cd3340f4063508b6f707d5fc2a35f5429a07f\\\",\\n \\\"azureml.git.dirty\\\": \\\"True\\\",\\n \\\"ProcessInfoFile\\\": \\\"azureml-logs/process_info.json\\\",\\n \\\"ProcessStatusFile\\\": \\\"azureml-logs/process_status.json\\\"\\n },\\n \\\"scriptName\\\": \\\"azureml_01_modelling.py\\\",\\n \\\"target\\\": \\\"gpuclstfwi02\\\",\\n \\\"tags\\\": {\\n \\\"_aml_system_ComputeTargetStatus\\\": \\\"{\\\\\\\"AllocationState\\\\\\\":\\\\\\\"steady\\\\\\\",\\\\\\\"PreparingNodeCount\\\\\\\":1,\\\\\\\"RunningNodeCount\\\\\\\":0,\\\\\\\"CurrentNodeCount\\\\\\\":1}\\\"\\n },\\n \\\"inputDatasets\\\": [],\\n \\\"runDefinition\\\": null,\\n \\\"createdFrom\\\": {\\n \\\"type\\\": \\\"Notebook\\\",\\n \\\"locationType\\\": \\\"ArtifactId\\\",\\n \\\"location\\\": \\\"LocalUpload/020_AzureMLEstimator_1575674728_d40baeba/020_UseAzureMLEstimatorForExperimentation_GeophysicsTutorial_FWI_Azure_devito.ipynb\\\"\\n },\\n \\\"cancelUri\\\": \\\"https://eastus2.experiments.azureml.net/execution/v1.0/subscriptions/789908e0-5fc2-4c4d-b5f5-9764b0d602b3/resourceGroups/ghiordanfwirsg01/providers/Microsoft.MachineLearningServices/workspaces/ghiordanfwiws/experiments/020_AzureMLEstimator/runId/020_AzureMLEstimator_1575674728_d40baeba/cancel\\\",\\n \\\"completeUri\\\": null,\\n \\\"diagnosticsUri\\\": \\\"https://eastus2.experiments.azureml.net/execution/v1.0/subscriptions/789908e0-5fc2-4c4d-b5f5-9764b0d602b3/resourceGroups/ghiordanfwirsg01/providers/Microsoft.MachineLearningServices/workspaces/ghiordanfwiws/experiments/020_AzureMLEstimator/runId/020_AzureMLEstimator_1575674728_d40baeba/diagnostics\\\",\\n \\\"computeRequest\\\": {\\n \\\"nodeCount\\\": 1\\n },\\n \\\"retainForLifetimeOfWorkspace\\\": false\\n}\\n2019-12-06 23:32:46,022|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.RunClient.get-async:False|DEBUG|[STOP]\\n2019-12-06 23:32:46,023|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba|DEBUG|Constructing run from dto. type: azureml.scriptrun, source: None, props: {'_azureml.ComputeTargetType': 'amlcompute', 'ContentSnapshotId': 'a5071b2a-37a7-40da-8340-69cc894091cb', 'azureml.git.repository_uri': 'git@github.com:georgeAccnt-GH/DeepSeismic.git', 'mlflow.source.git.repoURL': 'git@github.com:georgeAccnt-GH/DeepSeismic.git', 'azureml.git.branch': 'staging', 'mlflow.source.git.branch': 'staging', 'azureml.git.commit': '1d3cd3340f4063508b6f707d5fc2a35f5429a07f', 'mlflow.source.git.commit': '1d3cd3340f4063508b6f707d5fc2a35f5429a07f', 'azureml.git.dirty': 'True', 'ProcessInfoFile': 'azureml-logs/process_info.json', 'ProcessStatusFile': 'azureml-logs/process_status.json'}\\n2019-12-06 23:32:46,023|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunContextManager|DEBUG|Valid logs dir, setting up content loader\\n2019-12-06 23:33:13,322|azureml.core.authentication|DEBUG|Time to expire 1813936.677149 seconds\\n2019-12-06 23:33:43,323|azureml.core.authentication|DEBUG|Time to expire 1813906.67683 seconds\\n2019-12-06 23:33:57,866|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient|DEBUG|Overrides: Max batch size: 50, batch cushion: 5, Interval: 1.\\n2019-12-06 23:33:57,867|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch.PostMetricsBatchDaemon|DEBUG|Starting daemon and triggering first instance\\n2019-12-06 23:33:57,867|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient|DEBUG|Used for use_batch=True.\\n2019-12-06 23:33:57,911|azureml.history._tracking.PythonWorkingDirectory.workingdir|DEBUG|Calling pyfs\\n2019-12-06 23:33:57,911|azureml.history._tracking.PythonWorkingDirectory|INFO|Current working dir: /devito\\n2019-12-06 23:33:57,911|azureml.history._tracking.PythonWorkingDirectory.workingdir|DEBUG|pyfs has path /devito\\n2019-12-06 23:33:57,911|azureml.history._tracking.PythonWorkingDirectory.workingdir|DEBUG|Reverting working dir from /devito to /mnt/batch/tasks/shared/LS_root/jobs/ghiordanfwiws/azureml/020_azuremlestimator_1575674728_d40baeba/mounts/workspaceblobstore/azureml/020_AzureMLEstimator_1575674728_d40baeba\\n2019-12-06 23:33:57,911|azureml.history._tracking.PythonWorkingDirectory|INFO|Setting working dir to /mnt/batch/tasks/shared/LS_root/jobs/ghiordanfwiws/azureml/020_azuremlestimator_1575674728_d40baeba/mounts/workspaceblobstore/azureml/020_AzureMLEstimator_1575674728_d40baeba\\n2019-12-06 23:33:57,912|azureml.history._tracking.PythonWorkingDirectory.workingdir|DEBUG|[STOP]\\n2019-12-06 23:33:57,912|azureml.WorkingDirectoryCM|DEBUG|[STOP]\\n2019-12-06 23:33:57,912|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba|INFO|complete is not setting status for submitted runs.\\n2019-12-06 23:33:57,912|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.FlushingMetricsClient|DEBUG|[START]\\n2019-12-06 23:33:57,912|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient|DEBUG|Overrides: Max batch size: 50, batch cushion: 5, Interval: 1.\\n2019-12-06 23:33:57,912|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch.PostMetricsBatchDaemon|DEBUG|Starting daemon and triggering first instance\\n2019-12-06 23:33:57,912|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient|DEBUG|Used for use_batch=True.\\n2019-12-06 23:33:57,912|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|[START]\\n2019-12-06 23:33:57,912|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|flush timeout 300 is different from task queue timeout 120, using flush timeout\\n2019-12-06 23:33:57,913|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|Waiting 300 seconds on tasks: [].\\n2019-12-06 23:33:57,913|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch|DEBUG|\\n2019-12-06 23:33:57,913|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|[STOP]\\n2019-12-06 23:33:57,913|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.FlushingMetricsClient|DEBUG|[STOP]\\n2019-12-06 23:33:57,913|azureml.RunStatusContext|DEBUG|[STOP]\\n2019-12-06 23:33:57,913|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.FlushingMetricsClient|DEBUG|[START]\\n2019-12-06 23:33:57,913|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|[START]\\n2019-12-06 23:33:57,913|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|flush timeout 300.0 is different from task queue timeout 120, using flush timeout\\n2019-12-06 23:33:57,913|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|Waiting 300.0 seconds on tasks: [].\\n2019-12-06 23:33:57,913|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch|DEBUG|\\n2019-12-06 23:33:57,913|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|[STOP]\\n2019-12-06 23:33:57,914|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.FlushingMetricsClient|DEBUG|[STOP]\\n2019-12-06 23:33:57,914|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.FlushingMetricsClient|DEBUG|[START]\\n2019-12-06 23:33:57,914|azureml.BatchTaskQueueAdd_1_Batches|DEBUG|[Start]\\n2019-12-06 23:33:57,914|azureml.BatchTaskQueueAdd_1_Batches.WorkerPool|DEBUG|submitting future: _handle_batch\\n2019-12-06 23:33:57,914|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch|DEBUG|Batch size 1.\\n2019-12-06 23:33:57,914|azureml.BatchTaskQueueAdd_1_Batches.0__handle_batch|DEBUG|Using basic handler - no exception handling\\n2019-12-06 23:33:57,914|azureml._restclient.clientbase.WorkerPool|DEBUG|submitting future: _log_batch\\n2019-12-06 23:33:57,914|azureml.BatchTaskQueueAdd_1_Batches|DEBUG|Adding task 0__handle_batch to queue of approximate size: 0\\n2019-12-06 23:33:57,915|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch.0__log_batch|DEBUG|Using basic handler - no exception handling\\n2019-12-06 23:33:57,915|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.post_batch-async:False|DEBUG|[START]\\n2019-12-06 23:33:57,915|azureml.BatchTaskQueueAdd_1_Batches|DEBUG|[Stop] - waiting default timeout\\n2019-12-06 23:33:57,915|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch|DEBUG|Adding task 0__log_batch to queue of approximate size: 0\\n2019-12-06 23:33:57,916|msrest.service_client|DEBUG|Accept header absent and forced to application/json\\n2019-12-06 23:33:57,916|azureml.BatchTaskQueueAdd_1_Batches.WaitFlushSource:BatchTaskQueueAdd_1_Batches|DEBUG|[START]\\n2019-12-06 23:33:57,917|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-06 23:33:57,917|azureml.BatchTaskQueueAdd_1_Batches.WaitFlushSource:BatchTaskQueueAdd_1_Batches|DEBUG|Overriding default flush timeout from None to 120\\n2019-12-06 23:33:57,917|msrest.http_logger|DEBUG|Request URL: 'https://eastus2.experiments.azureml.net/history/v1.0/subscriptions/789908e0-5fc2-4c4d-b5f5-9764b0d602b3/resourceGroups/ghiordanfwirsg01/providers/Microsoft.MachineLearningServices/workspaces/ghiordanfwiws/experiments/020_AzureMLEstimator/runs/020_AzureMLEstimator_1575674728_d40baeba/batch/metrics'\\n2019-12-06 23:33:57,917|azureml.BatchTaskQueueAdd_1_Batches.WaitFlushSource:BatchTaskQueueAdd_1_Batches|DEBUG|Waiting 120 seconds on tasks: [AsyncTask(0__handle_batch)].\\n2019-12-06 23:33:57,918|msrest.http_logger|DEBUG|Request method: 'POST'\\n2019-12-06 23:33:57,918|azureml.BatchTaskQueueAdd_1_Batches.0__handle_batch.WaitingTask|DEBUG|[START]\\n2019-12-06 23:33:57,918|msrest.http_logger|DEBUG|Request headers:\\n2019-12-06 23:33:57,918|azureml.BatchTaskQueueAdd_1_Batches.0__handle_batch.WaitingTask|DEBUG|Awaiter is BatchTaskQueueAdd_1_Batches\\n2019-12-06 23:33:57,918|msrest.http_logger|DEBUG| 'Accept': 'application/json'\\n2019-12-06 23:33:57,918|azureml.BatchTaskQueueAdd_1_Batches.0__handle_batch.WaitingTask|DEBUG|[STOP]\\n2019-12-06 23:33:57,918|msrest.http_logger|DEBUG| 'Content-Type': 'application/json-patch+json; charset=utf-8'\\n2019-12-06 23:33:57,918|azureml.BatchTaskQueueAdd_1_Batches|DEBUG|\\n2019-12-06 23:33:57,918|msrest.http_logger|DEBUG| 'x-ms-client-request-id': '7318af30-3aa3-4d84-a4db-0595c67afd70'\\n2019-12-06 23:33:57,918|azureml.BatchTaskQueueAdd_1_Batches.WaitFlushSource:BatchTaskQueueAdd_1_Batches|DEBUG|[STOP]\\n2019-12-06 23:33:57,919|msrest.http_logger|DEBUG| 'request-id': '7318af30-3aa3-4d84-a4db-0595c67afd70'\\n2019-12-06 23:33:57,919|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|[START]\\n2019-12-06 23:33:57,919|msrest.http_logger|DEBUG| 'Content-Length': '410'\\n2019-12-06 23:33:57,919|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|flush timeout 300.0 is different from task queue timeout 120, using flush timeout\\n2019-12-06 23:33:57,919|msrest.http_logger|DEBUG| 'User-Agent': 'python/3.6.9 (Linux-4.15.0-1057-azure-x86_64-with-debian-10.0) msrest/0.6.10 azureml._restclient/core.1.0.76 sdk_run'\\n2019-12-06 23:33:57,919|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|Waiting 300.0 seconds on tasks: [AsyncTask(0__log_batch)].\\n2019-12-06 23:33:57,919|msrest.http_logger|DEBUG|Request body:\\n2019-12-06 23:33:57,919|msrest.http_logger|DEBUG|{\\\"values\\\": [{\\\"metricId\\\": \\\"d160ffa3-e1bc-4ff2-b60f-7742b38cdfd2\\\", \\\"metricType\\\": \\\"azureml.v1.scalar\\\", \\\"createdUtc\\\": \\\"2019-12-06T23:33:57.866688Z\\\", \\\"name\\\": \\\"training_message01: \\\", \\\"description\\\": \\\"\\\", \\\"numCells\\\": 1, \\\"cells\\\": [{\\\"training_message01: \\\": \\\"finished experiment\\\"}], \\\"schema\\\": {\\\"numProperties\\\": 1, \\\"properties\\\": [{\\\"propertyId\\\": \\\"training_message01: \\\", \\\"name\\\": \\\"training_message01: \\\", \\\"type\\\": \\\"string\\\"}]}}]}\\n2019-12-06 23:33:57,919|msrest.universal_http|DEBUG|Configuring redirects: allow=True, max=30\\n2019-12-06 23:33:57,920|msrest.universal_http|DEBUG|Configuring request: timeout=100, verify=True, cert=None\\n2019-12-06 23:33:57,920|msrest.universal_http|DEBUG|Configuring proxies: ''\\n2019-12-06 23:33:57,920|msrest.universal_http|DEBUG|Evaluate proxies against ENV settings: True\\n2019-12-06 23:33:58,044|msrest.http_logger|DEBUG|Response status: 200\\n2019-12-06 23:33:58,044|msrest.http_logger|DEBUG|Response headers:\\n2019-12-06 23:33:58,044|msrest.http_logger|DEBUG| 'Date': 'Fri, 06 Dec 2019 23:33:58 GMT'\\n2019-12-06 23:33:58,044|msrest.http_logger|DEBUG| 'Content-Length': '0'\\n2019-12-06 23:33:58,044|msrest.http_logger|DEBUG| 'Connection': 'keep-alive'\\n2019-12-06 23:33:58,044|msrest.http_logger|DEBUG| 'Request-Context': 'appId=cid-v1:2d2e8e63-272e-4b3c-8598-4ee570a0e70d'\\n2019-12-06 23:33:58,044|msrest.http_logger|DEBUG| 'x-ms-client-request-id': '7318af30-3aa3-4d84-a4db-0595c67afd70'\\n2019-12-06 23:33:58,044|msrest.http_logger|DEBUG| 'x-ms-client-session-id': ''\\n2019-12-06 23:33:58,045|msrest.http_logger|DEBUG| 'Strict-Transport-Security': 'max-age=15724800; includeSubDomains; preload'\\n2019-12-06 23:33:58,045|msrest.http_logger|DEBUG| 'X-Content-Type-Options': 'nosniff'\\n2019-12-06 23:33:58,045|msrest.http_logger|DEBUG|Response content:\\n2019-12-06 23:33:58,045|msrest.http_logger|DEBUG|\\n2019-12-06 23:33:58,045|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.post_batch-async:False|DEBUG|[STOP]\\n2019-12-06 23:33:58,170|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch.0__log_batch.WaitingTask|DEBUG|[START]\\n2019-12-06 23:33:58,170|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch.0__log_batch.WaitingTask|DEBUG|Awaiter is PostMetricsBatch\\n2019-12-06 23:33:58,170|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch.0__log_batch.WaitingTask|DEBUG|[STOP]\\n2019-12-06 23:33:58,170|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch|DEBUG|Waiting on task: 0__log_batch.\\n1 tasks left. Current duration of flush 0.0002143383026123047 seconds.\\n\\n2019-12-06 23:33:58,170|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|[STOP]\\n2019-12-06 23:33:58,170|azureml._SubmittedRun#020_AzureMLEstimator_1575674728_d40baeba.RunHistoryFacade.MetricsClient.FlushingMetricsClient|DEBUG|[STOP]\\n2019-12-06 23:33:58,170|azureml.SendRunKillSignal|DEBUG|[STOP]\\n2019-12-06 23:33:58,170|azureml.HistoryTrackingWorkerPool.WorkerPoolShutdown|DEBUG|[START]\\n2019-12-06 23:33:58,170|azureml.HistoryTrackingWorkerPool.WorkerPoolShutdown|DEBUG|[STOP]\\n2019-12-06 23:33:58,170|azureml.WorkerPool|DEBUG|[STOP]\\n\\nRun is completed.\", \"graph\": {}, \"widget_settings\": {\"childWidgetDisplay\": \"popup\", \"send_telemetry\": false, \"log_level\": \"NOTSET\", \"sdk_version\": \"1.0.76\"}, \"loading\": false}" + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# use a custom Docker image\n", + "from azureml.core.container_registry import ContainerRegistry\n", + "\n", + "image_name = docker_image_name\n", + "\n", + "# you can also point to an image in a private ACR\n", + "image_registry_details = ContainerRegistry()\n", + "image_registry_details.address = docker_repo_name\n", + "image_registry_details.username = os.getenv('ACR_USERNAME')\n", + "image_registry_details.password = os.getenv('ACR_PASSWORD') \n", + "\n", + "# don't let the system build a new conda environment\n", + "user_managed_dependencies = True\n", + "\n", + "# submit to a local Docker container. if you don't have Docker engine running locally, you can set compute_target to cpu_cluster.\n", + "script_params = {\n", + " '--output_folder': 'some_folder'\n", + "}\n", + "\n", + "\n", + "# distributed_training_conf = MpiConfiguration()\n", + "# distributed_training_conf.process_count_per_node = 2\n", + "\n", + "est = Estimator(source_directory=script_path, \n", + " compute_target=gpu_cluster,#'local', #gpu_cluster, \n", + " entry_script=azureml_training_script_file,\n", + " script_params=script_params,\n", + " use_docker=True,\n", + " custom_docker_image=image_name,\n", + " # uncomment below line to use your private ACR\n", + " image_registry_details=image_registry_details, \n", + " user_managed=user_managed_dependencies,\n", + " distributed_training=None,\n", + " node_count=1\n", + " )\n", + "est.run_config.environment.python.interpreter_path = python_path_in_docker_image\n", + "\n", + "run = exp.submit(est)\n", + "RunDetails(run).show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "One can use the above link to currrent experiment run in Azure Portal to see tracked metrics, and images and output notebooks saved by azureml_training_script_full_file_name in {run_dir}/outputs on the remote compute target that are automatically saved by AzureML in the run history Azure portal pages." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'runId= 020_AzureMLEstimator_1575674728_d40baeba'" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "'experimentation baseImage: fwi01_azureml:sdk.v1.0.76'" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "run_details = run.get_details()\n", + "\n", + "# print some details of job run\n", + "'runId= {}'.format(run_details['runId'])\n", + "'experimentation baseImage: {}'.format(run_details['runDefinition']['environment']['docker']['baseImage'])" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Finished running 020_UseAzureMLEstimatorForExperimentation_GeophysicsTutorial_FWI_Azure_devito!\n" + ] + } + ], + "source": [ + "print('Finished running 020_UseAzureMLEstimatorForExperimentation_GeophysicsTutorial_FWI_Azure_devito!')" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/contrib/fwi/azureml_devito/notebooks/030_ScaleJobsUsingAzuremL_GeophysicsTutorial_FWI_Azure_devito.ipynb b/contrib/fwi/azureml_devito/notebooks/030_ScaleJobsUsingAzuremL_GeophysicsTutorial_FWI_Azure_devito.ipynb new file mode 100755 index 00000000..1b6ecaf4 --- /dev/null +++ b/contrib/fwi/azureml_devito/notebooks/030_ScaleJobsUsingAzuremL_GeophysicsTutorial_FWI_Azure_devito.ipynb @@ -0,0 +1,1369 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Copyright (c) Microsoft Corporation. \n", + "Licensed under the MIT License. \n", + " \n", + "\n", + "# FWI demo based on: \n", + "This project ports devito (https://github.com/opesci/devito) into Azure and runs tutorial notebooks at:\n", + "https://nbviewer.jupyter.org/github/opesci/devito/blob/master/examples/seismic/tutorials/\n", + "\n", + "\n", + "\n", + "In this notebook we run the devito demo [notebooks](https://nbviewer.jupyter.org/github/opesci/devito/blob/master/examples/seismic/tutorials/) mentioned above by using an [AzureML estimator](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.estimator.estimator?view=azure-ml-py) with custom docker image. The docker image and associated docker file were created in previous notebook.\n", + "\n", + "\n", + "#### This notebook is used as a control plane to submit experimentation jobs running devito in Azure in two modes (see [remote run azureml python script file invoking devito](#devito_demo_mode)):\n", + " - [Mode 1](#devito_demo_mode_1):\n", + " - uses custom code (slightly modified graphing functions save images to files too) \n", + " - experimentation job is defined by the devito code that is packaged as a py file to be run on an Azure remote compute target\n", + " - experimentation job can be used to track metrics or other artifacts (images)\n", + " \n", + " - Mode 2:\n", + " - papermill is invoked via cli or via its Python API to run unedited devito demo notebooks (https://github.com/opesci/devito/tree/master/examples/seismic/tutorials) on the remote compute target and get back the results as saved notebooks that are then Available in Azure portal. \n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# Allow multiple displays per cell\n", + "from IPython.core.interactiveshell import InteractiveShell\n", + "InteractiveShell.ast_node_interactivity = \"all\" " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import sys, os\n", + "import shutil\n", + "import urllib\n", + "import azureml.core\n", + "from azureml.core import Workspace, Experiment\n", + "from azureml.core.compute import ComputeTarget, AmlCompute\n", + "from azureml.core.compute_target import ComputeTargetException\n", + "from azureml.core.runconfig import MpiConfiguration\n", + "\n", + "\n", + "# from azureml.core.datastore import Datastore\n", + "# from azureml.data.data_reference import DataReference\n", + "# from azureml.pipeline.steps import HyperDriveStep\n", + "# from azureml.pipeline.core import Pipeline, PipelineData\n", + "# from azureml.train.dnn import TensorFlow\n", + "\n", + "from azureml.train.estimator import Estimator\n", + "from azureml.widgets import RunDetails\n", + "\n", + "import platform" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Azure ML SDK Version: 1.0.76\n" + ] + }, + { + "data": { + "text/plain": [ + "'Linux-4.15.0-1064-azure-x86_64-with-debian-stretch-sid'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "'/datadrive01/prj/DeepSeismic/contrib/fwi/azureml_devito/notebooks'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "print(\"Azure ML SDK Version: \", azureml.core.VERSION)\n", + "platform.platform()\n", + "os.getcwd()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[None]" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "'./../not_shared/general.env'" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "def add_path_to_sys_path(path_to_append):\n", + " if not (any(path_to_append in paths for paths in sys.path)):\n", + " sys.path.append(path_to_append)\n", + " \n", + "auxiliary_files_dir = os.path.join(*(['.', 'src']))\n", + "paths_to_append = [os.path.join(os.getcwd(), auxiliary_files_dir)]\n", + "[add_path_to_sys_path(crt_path) for crt_path in paths_to_append]\n", + "\n", + "import project_utils\n", + "prj_consts = project_utils.project_consts()\n", + "\n", + "dotenv_file_path = os.path.join(*(prj_consts.DOTENV_FILE_PATH))\n", + "dotenv_file_path" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext dotenv" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'./../not_shared'" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "workspace_config_dir = os.path.join(*(prj_consts.AML_WORKSPACE_CONFIG_DIR))\n", + "workspace_config_file = prj_consts.AML_WORKSPACE_CONFIG_FILE_NAME\n", + "workspace_config_dir" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'./../temp/devito_tutorial/01_modelling.py'" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "'./../temp/devito_tutorial/azureml_01_modelling.py'" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "%dotenv $dotenv_file_path\n", + "\n", + "script_folder = prj_consts.AML_EXPERIMENT_DIR + ['devito_tutorial']\n", + "\n", + "devito_training_script_file = '01_modelling.py' # hardcoded in file azureml_training_script_full_file_name below\n", + "azureml_training_script_file = 'azureml_'+devito_training_script_file\n", + "experimentName = '020_AzureMLEstimator'\n", + "\n", + "os.makedirs(os.path.join(*(script_folder)), exist_ok=True)\n", + "script_path = os.path.join(*(script_folder))\n", + "training_script_full_file_name = os.path.join(script_path, devito_training_script_file)\n", + "azureml_training_script_full_file_name = os.path.join(script_path, azureml_training_script_file)\n", + "\n", + "training_script_full_file_name\n", + "azureml_training_script_full_file_name" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + " \n", + "##### devito in Azure ML demo mode 1\n", + "Create devito demo script based on \n", + "https://nbviewer.jupyter.org/github/opesci/devito/blob/master/examples/seismic/tutorials/01_modelling.ipynb\n", + "\n", + "[Back](#devito_in_AzureML_demoing_modes) to summary of modes od demoing devito in AzureML.\n", + "\n", + "Main purpose of this script is to extend _plot_velocity()_ and _plot_shotrecord()_ devito [plotting functions](https://github.com/opesci/devito/blob/master/examples/seismic/plotting.py) to allow the mto work in batch mode, i.e. save output to a file." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting ./../temp/devito_tutorial/01_modelling.py\n" + ] + } + ], + "source": [ + "%%writefile $training_script_full_file_name\n", + "\n", + "import numpy as np\n", + "import os, argparse\n", + "\n", + "from examples.seismic import Model\n", + "from examples.seismic import TimeAxis\n", + "from examples.seismic import Receiver\n", + "from devito import TimeFunction\n", + "from devito import Eq, solve\n", + "from devito import Operator\n", + "\n", + "\n", + "# try:\n", + "import matplotlib as mpl\n", + "import matplotlib.pyplot as plt\n", + "from matplotlib import cm\n", + "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", + "\n", + "mpl.rc('font', size=16)\n", + "mpl.rc('figure', figsize=(8, 6))\n", + "# except:\n", + "# plt = None\n", + "# cm = None\n", + " \n", + "\n", + "\n", + "# \"all\" plotting utils in devito do not save to file, so we extend them here\n", + "# https://github.com/opesci/devito/blob/master/examples/seismic/plotting.py\n", + "def plot_velocity(model, source=None, receiver=None, colorbar=True, file=None):\n", + " \"\"\"\n", + " Plot a two-dimensional velocity field from a seismic `Model`\n", + " object. Optionally also includes point markers for sources and receivers.\n", + "\n", + " Parameters\n", + " ----------\n", + " model : Model\n", + " Object that holds the velocity model.\n", + " source : array_like or float\n", + " Coordinates of the source point.\n", + " receiver : array_like or float\n", + " Coordinates of the receiver points.\n", + " colorbar : bool\n", + " Option to plot the colorbar.\n", + " \"\"\"\n", + " domain_size = 1.e-3 * np.array(model.domain_size)\n", + " extent = [model.origin[0], model.origin[0] + domain_size[0],\n", + " model.origin[1] + domain_size[1], model.origin[1]]\n", + "\n", + " plot = plt.imshow(np.transpose(model.vp.data), animated=True, cmap=cm.jet,\n", + " vmin=np.min(model.vp.data), vmax=np.max(model.vp.data),\n", + " extent=extent)\n", + " plt.xlabel('X position (km)')\n", + " plt.ylabel('Depth (km)')\n", + "\n", + " # Plot source points, if provided\n", + " if receiver is not None:\n", + " plt.scatter(1e-3*receiver[:, 0], 1e-3*receiver[:, 1],\n", + " s=25, c='green', marker='D')\n", + "\n", + " # Plot receiver points, if provided\n", + " if source is not None:\n", + " plt.scatter(1e-3*source[:, 0], 1e-3*source[:, 1],\n", + " s=25, c='red', marker='o')\n", + "\n", + " # Ensure axis limits\n", + " plt.xlim(model.origin[0], model.origin[0] + domain_size[0])\n", + " plt.ylim(model.origin[1] + domain_size[1], model.origin[1])\n", + "\n", + " # Create aligned colorbar on the right\n", + " if colorbar:\n", + " ax = plt.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " cbar = plt.colorbar(plot, cax=cax)\n", + " cbar.set_label('Velocity (km/s)')\n", + " plt.show()\n", + " \n", + " if file is not None:\n", + " plt.savefig(file)\n", + " print('plotted image saved as {} file'.format(file))\n", + " \n", + " plt.clf()\n", + "\n", + "def plot_shotrecord(rec, model, t0, tn, colorbar=True, file=None):\n", + " \"\"\"\n", + " Plot a shot record (receiver values over time).\n", + "\n", + " Parameters\n", + " ----------\n", + " rec :\n", + " Receiver data with shape (time, points).\n", + " model : Model\n", + " object that holds the velocity model.\n", + " t0 : int\n", + " Start of time dimension to plot.\n", + " tn : int\n", + " End of time dimension to plot.\n", + " \"\"\"\n", + " scale = np.max(rec) / 10.\n", + " extent = [model.origin[0], model.origin[0] + 1e-3*model.domain_size[0],\n", + " 1e-3*tn, t0]\n", + "\n", + " plot = plt.imshow(rec, vmin=-scale, vmax=scale, cmap=cm.gray, extent=extent)\n", + " plt.xlabel('X position (km)')\n", + " plt.ylabel('Time (s)')\n", + "\n", + " # Create aligned colorbar on the right\n", + " if colorbar:\n", + " ax = plt.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " plt.colorbar(plot, cax=cax)\n", + " plt.show() \n", + " \n", + " if file is not None:\n", + " plt.savefig(file)\n", + " print('plotted image saved as {} file'.format(file))\n", + " \n", + " plt.clf()\n", + "\n", + "def main(output_folder): \n", + " # 1. Define the physical problem\n", + " # The first step is to define the physical model:\n", + " # - physical dimensions of interest\n", + " # - velocity profile of this physical domain\n", + "\n", + " # Define a physical size\n", + " shape = (101, 101) # Number of grid point (nx, nz)\n", + " spacing = (10., 10.) # Grid spacing in m. The domain size is now 1km by 1km\n", + " origin = (0., 0.) # What is the location of the top left corner. This is necessary to define\n", + " # the absolute location of the source and receivers\n", + "\n", + " # Define a velocity profile. The velocity is in km/s\n", + " v = np.empty(shape, dtype=np.float32)\n", + " v[:, :51] = 1.5\n", + " v[:, 51:] = 2.5\n", + "\n", + " # With the velocity and model size defined, we can create the seismic model that\n", + " # encapsulates this properties. We also define the size of the absorbing layer as 10 grid points\n", + " model = Model(vp=v, origin=origin, shape=shape, spacing=spacing,\n", + " space_order=2, nbpml=10)\n", + "\n", + " plot_velocity(model, \n", + " file= os.path.join(*( [output_folder,'output000.png'])))\n", + " \n", + " # 2. Acquisition geometry\n", + " t0 = 0. # Simulation starts a t=0\n", + " tn = 1000. # Simulation last 1 second (1000 ms)\n", + " dt = model.critical_dt # Time step from model grid spacing\n", + "\n", + " time_range = TimeAxis(start=t0, stop=tn, step=dt)\n", + " from examples.seismic import RickerSource\n", + "\n", + " f0 = 0.010 # Source peak frequency is 10Hz (0.010 kHz)\n", + " src = RickerSource(name='src', grid=model.grid, f0=f0,\n", + " npoint=1, time_range=time_range)\n", + "\n", + " # First, position source centrally in all dimensions, then set depth\n", + " src.coordinates.data[0, :] = np.array(model.domain_size) * .5\n", + " src.coordinates.data[0, -1] = 20. # Depth is 20m\n", + "\n", + " # We can plot the time signature to see the wavelet\n", + "# src.show()\n", + "\n", + " # Create symbol for 101 receivers\n", + " rec = Receiver(name='rec', grid=model.grid, npoint=101, time_range=time_range)\n", + "\n", + " # Prescribe even spacing for receivers along the x-axis\n", + " rec.coordinates.data[:, 0] = np.linspace(0, model.domain_size[0], num=101)\n", + " rec.coordinates.data[:, 1] = 20. # Depth is 20m\n", + "\n", + " # We can now show the source and receivers within our domain:\n", + " # Red dot: Source location\n", + " # Green dots: Receiver locations (every 4th point)\n", + " plot_velocity(model, source=src.coordinates.data,\n", + " receiver=rec.coordinates.data[::4, :], \n", + " file= os.path.join(*( [output_folder,'output010.png'])))\n", + " \n", + " # Define the wavefield with the size of the model and the time dimension\n", + " u = TimeFunction(name=\"u\", grid=model.grid, time_order=2, space_order=2)\n", + "\n", + " # We can now write the PDE\n", + " pde = model.m * u.dt2 - u.laplace + model.damp * u.dt\n", + "\n", + " # The PDE representation is as on paper\n", + " pde\n", + " \n", + " # This discrete PDE can be solved in a time-marching way updating u(t+dt) from the previous time step\n", + " # Devito as a shortcut for u(t+dt) which is u.forward. We can then rewrite the PDE as \n", + " # a time marching updating equation known as a stencil using customized SymPy functions\n", + "\n", + " stencil = Eq(u.forward, solve(pde, u.forward))\n", + " # Finally we define the source injection and receiver read function to generate the corresponding code\n", + " src_term = src.inject(field=u.forward, expr=src * dt**2 / model.m)\n", + "\n", + " # Create interpolation expression for receivers\n", + " rec_term = rec.interpolate(expr=u.forward)\n", + "\n", + " op = Operator([stencil] + src_term + rec_term, subs=model.spacing_map)\n", + " \n", + " op(time=time_range.num-1, dt=model.critical_dt)\n", + " plot_shotrecord(rec.data, model, t0, tn, \n", + " file= os.path.join(*( [output_folder,'output020.png'])))\n", + "\n", + "if __name__ == \"__main__\":\n", + " parser = argparse.ArgumentParser()\n", + " parser.add_argument('--output_folder', type=str, nargs='?', \\\n", + " dest='output_folder', help='ouput artifacts location',\\\n", + " default='.')\n", + " args = parser.parse_args()\n", + " \n", + " main(args.output_folder)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Get experimentation docker image for devito" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'fwi01_azureml:sdk.v1.0.76'" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "'fwi01acr.azurecr.io/fwi01_azureml:sdk.v1.0.76'" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "docker_repo_name = os.getenv('ACR_NAME')+'.azurecr.io' # or os.getenv('DOCKER_LOGIN')\n", + "\n", + "docker_image_name = os.getenv('EXPERIMENTATION_DOCKER_IMAGE_NAME')\n", + "\n", + "image_version = os.getenv('EXPERIMENTATION_DOCKER_IMAGE_TAG')\n", + "if image_version!=\"\":\n", + " docker_image_name = docker_image_name +':'+ image_version\n", + "\n", + "full_docker_image_name = docker_repo_name + '/' + docker_image_name\n", + " \n", + "docker_image_name\n", + "full_docker_image_name" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Extract/decide the python path in custom docker image that corresponds to desired conda environment. Without this, AzureML tries to create a separate environment." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'docker run -i --rm --name fwi01_azureml_container02 fwi01acr.azurecr.io/fwi01_azureml:sdk.v1.0.76 /bin/bash -c \"which python\" '" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "'/opt/conda/envs/fwi01_conda_env/bin/python'" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "get_Python_path_command='docker run -i --rm --name fwi01_azureml_container02 '+ \\\n", + "full_docker_image_name + \\\n", + "' /bin/bash -c \"which python\" '\n", + "get_Python_path_command\n", + "\n", + "\n", + "import subprocess\n", + "python_path_in_docker_image = subprocess.check_output(get_Python_path_command,shell=True,stderr=subprocess.STDOUT).\\\n", + "decode('utf-8').strip()\n", + "python_path_in_docker_image" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "#### Create azureml_script_file that invokes:\n", + " - devito exclusive custom edited training_script_file\n", + " - unedited devito notebooks via papermill (invoked via cli and via ppapermill python API)\n", + "\n", + "[Back](#devito_in_AzureML_demoing_modes) to notebook summary." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting ./../temp/devito_tutorial/azureml_01_modelling.py\n" + ] + } + ], + "source": [ + "%%writefile $azureml_training_script_full_file_name\n", + "\n", + "import argparse\n", + "import os\n", + "os.system('conda env list')\n", + "\n", + "import azureml.core;\n", + "from azureml.core.run import Run\n", + "\n", + "print(azureml.core.VERSION)\n", + "\n", + "parser = argparse.ArgumentParser()\n", + "parser.add_argument('--output_folder', type=str, dest='output_folder', help='ouput artifacts location')\n", + "\n", + "args = parser.parse_args()\n", + "print('args.output_folder is {} but it will be ignored since AzureML_tracked ./outputs will be used'.format(args.output_folder))\n", + "\n", + "# get the Azure ML run object\n", + "run = Run.get_context()\n", + "\n", + "# ./outputs/ folder is autotracked so should get uploaded at the end of the run\n", + "output_dir_AzureML_tracked = './outputs'\n", + "\n", + "crt_dir = os.getcwd()\n", + "\n", + "cli_command= \\\n", + "'cd /devito; /opt/conda/envs/fwi01_conda_env/bin/python '+ crt_dir +'/01_modelling.py' + \\\n", + "' --output_folder '+ crt_dir + output_dir_AzureML_tracked+ '/' + \\\n", + "' > '+ crt_dir + output_dir_AzureML_tracked + '/01_modelling.log' \n", + "# + \\\n", + "# ' 2>&1 ' + crt_dir +'/'+ output_dir_AzureML_tracked + '/devito_cli_py.log'\n", + "print('Running devito from cli on 01_modelling.py----BEGIN-----:') \n", + "print(cli_command); print('\\n');os.system(cli_command)\n", + "print('Running devito from cli on 01_modelling.py----END-----:\\n\\n')\n", + "\n", + "cli_command= \\\n", + "'cd /devito; papermill ' + \\\n", + "'./examples/seismic/tutorials/02_rtm.ipynb '+\\\n", + "crt_dir +'/outputs/02_rtm_output.ipynb ' + \\\n", + "'--log-output --no-progress-bar --kernel python3 ' + \\\n", + "' > '+ crt_dir + output_dir_AzureML_tracked + '/02_rtm_output.log' \n", + "# + \\\n", + "# ' 2>&1 ' + crt_dir +'/'+ output_dir_AzureML_tracked + '/papermill_cli.log'\n", + "\n", + "# FIXME - activate right conda env for running papermill from cli\n", + "activate_right_conda_env_fixed = False\n", + "if activate_right_conda_env_fixed:\n", + " print('Running papermill from cli on 02_rtm.ipynb----BEGIN-----:') \n", + " print(cli_command); print('\\n');os.system(cli_command)\n", + " print('Running papermill from cli on 02_rtm.ipynb----END-----:\\n\\n') \n", + "\n", + "\n", + "print('Running papermill from Python API on 03_fwi.ipynb----BEGIN-----:') \n", + "import papermill as pm\n", + "os.chdir('/devito')\n", + "pm.execute_notebook(\n", + " './examples/seismic/tutorials/03_fwi.ipynb',\n", + " crt_dir +'/outputs/03_fwi_output.ipynb'\n", + ")\n", + "print('Running papermill from Python API on 03_fwi.ipynb----END-----:') \n", + "\n", + "print('Running papermill from Python API on 04_dask.ipynb----BEGIN-----:') \n", + "import papermill as pm\n", + "os.chdir('/devito')\n", + "pm.execute_notebook(\n", + " './examples/seismic/tutorials/04_dask.ipynb',\n", + " crt_dir +'/outputs/04_dask_output.ipynb'\n", + ")\n", + "print('Running papermill from Python API on 04_dask.ipynb----END-----:') \n", + " \n", + "\n", + "os.system('pwd')\n", + "os.system('ls -l /')\n", + "os.system('ls -l ./')\n", + "os.system('ls -l ' +crt_dir + output_dir_AzureML_tracked)\n", + "run.log('training_message01: ', 'finished experiment')\n", + "print('\\n')" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['azureml_01_modelling.py', '01_modelling.py']" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "script_path=os.path.join(*(script_folder))\n", + "os.listdir(script_path)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Initialize workspace\n", + "\n", + "Initialize a workspace object from persisted configuration. If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure the config file is present at .\\config.json" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING - Warning: Falling back to use azure cli login credentials.\n", + "If you run your code in unattended mode, i.e., where you can't give a user input, then we recommend to use ServicePrincipalAuthentication or MsiAuthentication.\n", + "Please refer to aka.ms/aml-notebook-auth for different authentication mechanisms in azureml-sdk.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Workspace name: ghiordanfwiws\n", + "Azure region: eastus2\n", + "Subscription id: 7899\n" + ] + } + ], + "source": [ + "ws = Workspace.from_config(\n", + " path=os.path.join(os.getcwd(),\n", + " os.path.join(*([workspace_config_dir, '.azureml', workspace_config_file]))))\n", + "print('Workspace name: ' + ws.name, \n", + " 'Azure region: ' + ws.location, \n", + " 'Subscription id: ' + ws.subscription_id[0:4], sep = '\\n')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create an Azure ML experiment\n", + "Let's create an experiment named \"tf-mnist\" and a folder to hold the training scripts. The script runs will be recorded under the experiment in Azure." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "exp = Experiment(workspace=ws, name=experimentName)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Retrieve or create a Azure Machine Learning compute\n", + "Azure Machine Learning Compute is a service for provisioning and managing clusters of Azure virtual machines for running machine learning workloads. Let's create a new Azure Machine Learning Compute in the current workspace, if it doesn't already exist. We will then run the training script on this compute target.\n", + "\n", + "If we could not find the compute with the given name in the previous cell, then we will create a new compute here. This process is broken down into the following steps:\n", + "\n", + "1. Create the configuration\n", + "2. Create the Azure Machine Learning compute\n", + "\n", + "**This process will take a few minutes and is providing only sparse output in the process. Please make sure to wait until the call returns before moving to the next cell.**" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'gpuclstfwi07'" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "gpu_cluster_name = os.getenv('GPU_CLUSTER_NAME')\n", + "gpu_cluster_name = 'gpuclstfwi07'\n", + "gpu_cluster_name" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found existing gpu cluster\n" + ] + } + ], + "source": [ + "# Verify that cluster does not exist already\n", + "max_nodes_value = 2\n", + "try:\n", + " gpu_cluster = ComputeTarget(workspace=ws, name=gpu_cluster_name)\n", + " print(\"Found existing gpu cluster\")\n", + "except ComputeTargetException:\n", + " print(\"Could not find ComputeTarget cluster!\")\n", + " \n", + "# # Create a new gpucluster using code below\n", + "# # Specify the configuration for the new cluster\n", + "# compute_config = AmlCompute.provisioning_configuration(vm_size=\"Standard_NC6\",\n", + "# min_nodes=0,\n", + "# max_nodes=max_nodes_value)\n", + "# # Create the cluster with the specified name and configuration\n", + "# gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, compute_config)\n", + "\n", + "# # Wait for the cluster to complete, show the output log\n", + "# gpu_cluster.wait_for_completion(show_output=True)\n", + " \n", + " \n", + "# for demo purposes, show how clsuter properties can be altered post-creation\n", + "gpu_cluster.update(min_nodes=0, max_nodes=max_nodes_value, idle_seconds_before_scaledown=1200)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Create an Azure ML SDK estimator with custom docker image " + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "a0312dfcb82f419288e3c3c37c39b9dd", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "_UserRunWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': False, 'log_level': 'NOTSET',…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/aml.mini.widget.v1": "{\"status\": \"Running\", \"workbench_run_details_uri\": \"https://ml.azure.com/experiments/020_AzureMLEstimator/runs/020_AzureMLEstimator_1575678435_be18a2fc?wsid=/subscriptions/789908e0-5fc2-4c4d-b5f5-9764b0d602b3/resourcegroups/ghiordanfwirsg01/workspaces/ghiordanfwiws\", \"run_id\": \"020_AzureMLEstimator_1575678435_be18a2fc\", \"run_properties\": {\"run_id\": \"020_AzureMLEstimator_1575678435_be18a2fc\", \"created_utc\": \"2019-12-07T00:27:18.102865Z\", \"properties\": {\"_azureml.ComputeTargetType\": \"amlcompute\", \"ContentSnapshotId\": \"a5071b2a-37a7-40da-8340-69cc894091cb\", \"azureml.git.repository_uri\": \"git@github.com:georgeAccnt-GH/DeepSeismic.git\", \"mlflow.source.git.repoURL\": \"git@github.com:georgeAccnt-GH/DeepSeismic.git\", \"azureml.git.branch\": \"staging\", \"mlflow.source.git.branch\": \"staging\", \"azureml.git.commit\": \"1d3cd3340f4063508b6f707d5fc2a35f5429a07f\", \"mlflow.source.git.commit\": \"1d3cd3340f4063508b6f707d5fc2a35f5429a07f\", \"azureml.git.dirty\": \"True\", \"ProcessInfoFile\": \"azureml-logs/process_info.json\", \"ProcessStatusFile\": \"azureml-logs/process_status.json\"}, \"tags\": {\"_aml_system_ComputeTargetStatus\": \"{\\\"AllocationState\\\":\\\"steady\\\",\\\"PreparingNodeCount\\\":1,\\\"RunningNodeCount\\\":1,\\\"CurrentNodeCount\\\":2}\"}, \"script_name\": null, \"arguments\": null, \"end_time_utc\": null, \"status\": \"Running\", \"log_files\": {\"azureml-logs/55_azureml-execution-tvmps_e010639b61f121ff1dbd780d646c8bd4bc6a423228429632e00c37ab5e150756_p.txt\": \"https://ghiordanstoragee145cef0b.blob.core.windows.net/azureml/ExperimentRun/dcid.020_AzureMLEstimator_1575678435_be18a2fc/azureml-logs/55_azureml-execution-tvmps_e010639b61f121ff1dbd780d646c8bd4bc6a423228429632e00c37ab5e150756_p.txt?sv=2019-02-02&sr=b&sig=99MfEJ4IvLwXgM3jjLm4amfljnv7gOK3%2BQPb1GN%2BZKg%3D&st=2019-12-07T00%3A22%3A27Z&se=2019-12-07T08%3A32%3A27Z&sp=r\"}, \"log_groups\": [[\"azureml-logs/55_azureml-execution-tvmps_e010639b61f121ff1dbd780d646c8bd4bc6a423228429632e00c37ab5e150756_p.txt\"]], \"run_duration\": \"0:05:10\"}, \"child_runs\": [], \"children_metrics\": {}, \"run_metrics\": [], \"run_logs\": \"2019-12-07T00:31:04Z Starting output-watcher...\\nLogin Succeeded\\nsdk.v1.0.76: Pulling from fwi01_azureml\\n1ab2bdfe9778: Pulling fs layer\\ndd7d28bd8be5: Pulling fs layer\\naf998e3a361b: Pulling fs layer\\n8f61820757bf: Pulling fs layer\\n0eb461057035: Pulling fs layer\\n23276e49c76d: Pulling fs layer\\nc55ca301ea9f: Pulling fs layer\\n0eb461057035: Waiting\\n8f61820757bf: Waiting\\nc55ca301ea9f: Waiting\\n1ab2bdfe9778: Verifying Checksum\\n1ab2bdfe9778: Download complete\\naf998e3a361b: Verifying Checksum\\naf998e3a361b: Download complete\\n0eb461057035: Verifying Checksum\\n0eb461057035: Download complete\\ndd7d28bd8be5: Verifying Checksum\\ndd7d28bd8be5: Download complete\\n1ab2bdfe9778: Pull complete\\n8f61820757bf: Verifying Checksum\\n8f61820757bf: Download complete\\ndd7d28bd8be5: Pull complete\\nc55ca301ea9f: Verifying Checksum\\nc55ca301ea9f: Download complete\\n23276e49c76d: Verifying Checksum\\n23276e49c76d: Download complete\\naf998e3a361b: Pull complete\\n8f61820757bf: Pull complete\\n0eb461057035: Pull complete\\n23276e49c76d: Pull complete\\n\", \"graph\": {}, \"widget_settings\": {\"childWidgetDisplay\": \"popup\", \"send_telemetry\": false, \"log_level\": \"NOTSET\", \"sdk_version\": \"1.0.76\"}, \"loading\": false}" + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# use a custom Docker image\n", + "from azureml.core.container_registry import ContainerRegistry\n", + "\n", + "image_name = docker_image_name\n", + "\n", + "# you can also point to an image in a private ACR\n", + "image_registry_details = ContainerRegistry()\n", + "image_registry_details.address = docker_repo_name\n", + "image_registry_details.username = os.getenv('ACR_USERNAME')\n", + "image_registry_details.password = os.getenv('ACR_PASSWORD') \n", + "\n", + "# don't let the system build a new conda environment\n", + "user_managed_dependencies = True\n", + "\n", + "# submit to a local Docker container. if you don't have Docker engine running locally, you can set compute_target to cpu_cluster.\n", + "script_params = {\n", + " '--output_folder': 'some_folder'\n", + "}\n", + "\n", + "\n", + "# distributed_training_conf = MpiConfiguration()\n", + "# distributed_training_conf.process_count_per_node = 2\n", + "\n", + "est = Estimator(source_directory=script_path, \n", + " compute_target=gpu_cluster,#'local', #gpu_cluster, \n", + " entry_script=azureml_training_script_file,\n", + " script_params=script_params,\n", + " use_docker=True,\n", + " custom_docker_image=image_name,\n", + " # uncomment below line to use your private ACR\n", + " image_registry_details=image_registry_details, \n", + " user_managed=user_managed_dependencies,\n", + " distributed_training=None,\n", + " node_count=1\n", + " )\n", + "est.run_config.environment.python.interpreter_path = python_path_in_docker_image\n", + "\n", + "run = exp.submit(est)\n", + "RunDetails(run).show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "One can use the above link to currrent experiment run in Azure Portal to see tracked metrics, and images and output notebooks saved by azureml_training_script_full_file_name in {run_dir}/outputs on the remote compute target that are automatically saved by AzureML in the run history Azure portal pages." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "response = run.wait_for_completion(show_output=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Final print 9, time 20.798 seconds: Counter({'Completed': 1})\r" + ] + } + ], + "source": [ + "import time\n", + "from collections import Counter\n", + "#wait till all jobs finished\n", + "\n", + "def wait_for_run_list_to_finish(the_run_list):\n", + " finished_status_list = ['Completed', 'Failed']\n", + " printing_counter = 0\n", + " start_time = time.time()\n", + " while (not all((crt_queried_job.get_status() in finished_status_list) for crt_queried_job in the_run_list)):\n", + " time.sleep(2)\n", + " printing_counter+= 1\n", + " print('print {0:.0f}, time {1:.3f} seconds: {2}'.format(printing_counter, time.time() - start_time, \n", + " str(Counter([crt_queried_job.get_status() for crt_queried_job in the_run_list]))), end=\"\\r\")\n", + "# final status\n", + " print('Final print {0:.0f}, time {1:.3f} seconds: {2}'.format(printing_counter, time.time() - start_time, \n", + " str(Counter([crt_queried_job.get_status() for crt_queried_job in the_run_list]))), end=\"\\r\") \n", + "wait_for_run_list_to_finish([run])" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "run_duration in seconds 243.960763\n", + "run_duration= 4m 3.961s\n" + ] + } + ], + "source": [ + "import datetime, math\n", + "def get_run_duration(azureml_exp_run):\n", + " run_details = azureml_exp_run.get_details()\n", + " run_duration = datetime.datetime.strptime(run_details['endTimeUtc'], \"%Y-%m-%dT%H:%M:%S.%fZ\") - \\\n", + " datetime.datetime.strptime(run_details['startTimeUtc'], \"%Y-%m-%dT%H:%M:%S.%fZ\")\n", + " return run_duration.total_seconds()\n", + "run_duration = get_run_duration(run)\n", + "\n", + "run_seconds, run_minutes = math.modf(run_duration/60)\n", + "print('run_duration in seconds {}'.format(run_duration))\n", + "print('run_duration= {0:.0f}m {1:.3f}s'.format(run_minutes, run_seconds*60))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Showing details for run 498\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "cd44e7b0a1c447dabe98bf114f420d76", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "_UserRunWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': False, 'log_level': 'NOTSET',…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/aml.mini.widget.v1": "{\"status\": \"Completed\", \"workbench_run_details_uri\": \"https://ml.azure.com/experiments/020_AzureMLEstimator/runs/020_AzureMLEstimator_1575683693_ddd16e31?wsid=/subscriptions/789908e0-5fc2-4c4d-b5f5-9764b0d602b3/resourcegroups/ghiordanfwirsg01/workspaces/ghiordanfwiws\", \"run_id\": \"020_AzureMLEstimator_1575683693_ddd16e31\", \"run_properties\": {\"run_id\": \"020_AzureMLEstimator_1575683693_ddd16e31\", \"created_utc\": \"2019-12-07T01:54:55.33033Z\", \"properties\": {\"_azureml.ComputeTargetType\": \"amlcompute\", \"ContentSnapshotId\": \"a5071b2a-37a7-40da-8340-69cc894091cb\", \"azureml.git.repository_uri\": \"git@github.com:georgeAccnt-GH/DeepSeismic.git\", \"mlflow.source.git.repoURL\": \"git@github.com:georgeAccnt-GH/DeepSeismic.git\", \"azureml.git.branch\": \"staging\", \"mlflow.source.git.branch\": \"staging\", \"azureml.git.commit\": \"1d3cd3340f4063508b6f707d5fc2a35f5429a07f\", \"mlflow.source.git.commit\": \"1d3cd3340f4063508b6f707d5fc2a35f5429a07f\", \"azureml.git.dirty\": \"True\", \"ProcessInfoFile\": \"azureml-logs/process_info.json\", \"ProcessStatusFile\": \"azureml-logs/process_status.json\"}, \"tags\": {}, \"script_name\": null, \"arguments\": null, \"end_time_utc\": \"2019-12-07T01:56:48.811115Z\", \"status\": \"Completed\", \"log_files\": {\"azureml-logs/55_azureml-execution-tvmps_01b47c06fd150418ce69a91b330cb6996c9e9e076f7368a183a2f9a708f17ccb_p.txt\": \"https://ghiordanstoragee145cef0b.blob.core.windows.net/azureml/ExperimentRun/dcid.020_AzureMLEstimator_1575683693_ddd16e31/azureml-logs/55_azureml-execution-tvmps_01b47c06fd150418ce69a91b330cb6996c9e9e076f7368a183a2f9a708f17ccb_p.txt?sv=2019-02-02&sr=b&sig=9mQARzuRlCW%2F%2Brv3FDzJvm%2Fsaudk6GFjNypMRkV3O8g%3D&st=2019-12-07T01%3A46%3A50Z&se=2019-12-07T09%3A56%3A50Z&sp=r\", \"azureml-logs/65_job_prep-tvmps_01b47c06fd150418ce69a91b330cb6996c9e9e076f7368a183a2f9a708f17ccb_p.txt\": \"https://ghiordanstoragee145cef0b.blob.core.windows.net/azureml/ExperimentRun/dcid.020_AzureMLEstimator_1575683693_ddd16e31/azureml-logs/65_job_prep-tvmps_01b47c06fd150418ce69a91b330cb6996c9e9e076f7368a183a2f9a708f17ccb_p.txt?sv=2019-02-02&sr=b&sig=TMxrg26ywABOyJtGYT3KVLrGP0TYIHQ9E3ePlr%2BQepg%3D&st=2019-12-07T01%3A46%3A50Z&se=2019-12-07T09%3A56%3A50Z&sp=r\", \"azureml-logs/70_driver_log.txt\": \"https://ghiordanstoragee145cef0b.blob.core.windows.net/azureml/ExperimentRun/dcid.020_AzureMLEstimator_1575683693_ddd16e31/azureml-logs/70_driver_log.txt?sv=2019-02-02&sr=b&sig=vWkErsH55%2BLhIG%2FBJbtZb8NSNHFyNAzxk5VjW4p6lcM%3D&st=2019-12-07T01%3A46%3A50Z&se=2019-12-07T09%3A56%3A50Z&sp=r\", \"azureml-logs/75_job_post-tvmps_01b47c06fd150418ce69a91b330cb6996c9e9e076f7368a183a2f9a708f17ccb_p.txt\": \"https://ghiordanstoragee145cef0b.blob.core.windows.net/azureml/ExperimentRun/dcid.020_AzureMLEstimator_1575683693_ddd16e31/azureml-logs/75_job_post-tvmps_01b47c06fd150418ce69a91b330cb6996c9e9e076f7368a183a2f9a708f17ccb_p.txt?sv=2019-02-02&sr=b&sig=cbDgvPNn4LNXDsUXZwmWCjRMj0O9PnFSqSCtuCPMTFo%3D&st=2019-12-07T01%3A46%3A50Z&se=2019-12-07T09%3A56%3A50Z&sp=r\", \"azureml-logs/process_info.json\": \"https://ghiordanstoragee145cef0b.blob.core.windows.net/azureml/ExperimentRun/dcid.020_AzureMLEstimator_1575683693_ddd16e31/azureml-logs/process_info.json?sv=2019-02-02&sr=b&sig=wvqhR%2Bnzw0uLEsCGETAxkKrdwN5eI%2FgvTeB4juQ4aUI%3D&st=2019-12-07T01%3A46%3A50Z&se=2019-12-07T09%3A56%3A50Z&sp=r\", \"azureml-logs/process_status.json\": \"https://ghiordanstoragee145cef0b.blob.core.windows.net/azureml/ExperimentRun/dcid.020_AzureMLEstimator_1575683693_ddd16e31/azureml-logs/process_status.json?sv=2019-02-02&sr=b&sig=kkirWrsrpjcrKndUUPxuJVeRWu0GthsVZ4cXpxbEGMg%3D&st=2019-12-07T01%3A46%3A50Z&se=2019-12-07T09%3A56%3A50Z&sp=r\", \"logs/azureml/728_azureml.log\": \"https://ghiordanstoragee145cef0b.blob.core.windows.net/azureml/ExperimentRun/dcid.020_AzureMLEstimator_1575683693_ddd16e31/logs/azureml/728_azureml.log?sv=2019-02-02&sr=b&sig=pK%2F6TBBvQEPexjuRPR1FyOq6CUPXfnNBobkTmpmaeiM%3D&st=2019-12-07T01%3A46%3A50Z&se=2019-12-07T09%3A56%3A50Z&sp=r\", \"logs/azureml/azureml.log\": \"https://ghiordanstoragee145cef0b.blob.core.windows.net/azureml/ExperimentRun/dcid.020_AzureMLEstimator_1575683693_ddd16e31/logs/azureml/azureml.log?sv=2019-02-02&sr=b&sig=o%2BPcdcJvKZyQWRA0HpaJbM%2BxhqFOkdDjgBqtxtHtoag%3D&st=2019-12-07T01%3A46%3A50Z&se=2019-12-07T09%3A56%3A50Z&sp=r\"}, \"log_groups\": [[\"azureml-logs/process_info.json\", \"azureml-logs/process_status.json\", \"logs/azureml/azureml.log\"], [\"azureml-logs/55_azureml-execution-tvmps_01b47c06fd150418ce69a91b330cb6996c9e9e076f7368a183a2f9a708f17ccb_p.txt\"], [\"azureml-logs/65_job_prep-tvmps_01b47c06fd150418ce69a91b330cb6996c9e9e076f7368a183a2f9a708f17ccb_p.txt\"], [\"azureml-logs/70_driver_log.txt\"], [\"azureml-logs/75_job_post-tvmps_01b47c06fd150418ce69a91b330cb6996c9e9e076f7368a183a2f9a708f17ccb_p.txt\"], [\"logs/azureml/728_azureml.log\"]], \"run_duration\": \"0:01:53\"}, \"child_runs\": [], \"children_metrics\": {}, \"run_metrics\": [{\"name\": \"training_message01: \", \"run_id\": \"020_AzureMLEstimator_1575683693_ddd16e31\", \"categories\": [0], \"series\": [{\"data\": [\"finished experiment\"]}]}], \"run_logs\": \"2019-12-07 01:55:16,975|azureml|DEBUG|Inputs:: kwargs: {'OutputCollection': True, 'snapshotProject': True, 'only_in_process_features': True, 'skip_track_logs_dir': True}, track_folders: None, deny_list: None, directories_to_watch: []\\n2019-12-07 01:55:16,976|azureml.history._tracking.PythonWorkingDirectory|DEBUG|Execution target type: batchai\\n2019-12-07 01:55:16,976|azureml.history._tracking.PythonWorkingDirectory|DEBUG|Failed to import pyspark with error: No module named 'pyspark'\\n2019-12-07 01:55:16,976|azureml.history._tracking.PythonWorkingDirectory.workingdir|DEBUG|Pinning working directory for filesystems: ['pyfs']\\n2019-12-07 01:55:17,242|azureml._base_sdk_common.user_agent|DEBUG|Fetching client info from /root/.azureml/clientinfo.json\\n2019-12-07 01:55:17,243|azureml._base_sdk_common.user_agent|DEBUG|Error loading client info: [Errno 2] No such file or directory: '/root/.azureml/clientinfo.json'\\n2019-12-07 01:55:17,566|azureml.core._experiment_method|DEBUG|Trying to register submit_function search, on method \\n2019-12-07 01:55:17,566|azureml.core._experiment_method|DEBUG|Registered submit_function search, on method \\n2019-12-07 01:55:17,566|azureml.core._experiment_method|DEBUG|Trying to register submit_function search, on method \\n2019-12-07 01:55:17,566|azureml.core._experiment_method|DEBUG|Registered submit_function search, on method \\n2019-12-07 01:55:17,566|azureml.core.run|DEBUG|Adding new factory for run source hyperdrive\\n2019-12-07 01:55:18,070|azureml.core.run|DEBUG|Adding new factory for run source azureml.PipelineRun\\n2019-12-07 01:55:18,075|azureml.core.run|DEBUG|Adding new factory for run source azureml.ReusedStepRun\\n2019-12-07 01:55:18,078|azureml.core.run|DEBUG|Adding new factory for run source azureml.StepRun\\n2019-12-07 01:55:18,082|azureml.core.run|DEBUG|Adding new factory for run source azureml.scriptrun\\n2019-12-07 01:55:18,083|azureml.core.authentication.TokenRefresherDaemon|DEBUG|Starting daemon and triggering first instance\\n2019-12-07 01:55:18,088|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-07 01:55:18,089|azureml._restclient.clientbase|INFO|Created a worker pool for first use\\n2019-12-07 01:55:18,089|azureml.core.authentication|DEBUG|Time to expire 1814376.910384 seconds\\n2019-12-07 01:55:18,089|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-07 01:55:18,089|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-07 01:55:18,089|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-07 01:55:18,089|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-07 01:55:18,090|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-07 01:55:18,090|azureml._base_sdk_common.service_discovery|DEBUG|Constructing mms service url in from history url environment variable None, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-07 01:55:18,090|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-07 01:55:18,090|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-07 01:55:18,090|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-07 01:55:18,118|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-07 01:55:18,122|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-07 01:55:18,128|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-07 01:55:18,132|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-07 01:55:18,136|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-07 01:55:18,141|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-07 01:55:18,141|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.RunClient.get-async:False|DEBUG|[START]\\n2019-12-07 01:55:18,142|msrest.service_client|DEBUG|Accept header absent and forced to application/json\\n2019-12-07 01:55:18,142|msrest.http_logger|DEBUG|Request URL: 'https://eastus2.experiments.azureml.net/history/v1.0/subscriptions/789908e0-5fc2-4c4d-b5f5-9764b0d602b3/resourceGroups/ghiordanfwirsg01/providers/Microsoft.MachineLearningServices/workspaces/ghiordanfwiws/experiments/020_AzureMLEstimator/runs/020_AzureMLEstimator_1575683693_ddd16e31'\\n2019-12-07 01:55:18,142|msrest.http_logger|DEBUG|Request method: 'GET'\\n2019-12-07 01:55:18,142|msrest.http_logger|DEBUG|Request headers:\\n2019-12-07 01:55:18,142|msrest.http_logger|DEBUG| 'Accept': 'application/json'\\n2019-12-07 01:55:18,142|msrest.http_logger|DEBUG| 'Content-Type': 'application/json; charset=utf-8'\\n2019-12-07 01:55:18,142|msrest.http_logger|DEBUG| 'x-ms-client-request-id': '066d53de-da2b-470f-936a-ed66dab2d28c'\\n2019-12-07 01:55:18,142|msrest.http_logger|DEBUG| 'request-id': '066d53de-da2b-470f-936a-ed66dab2d28c'\\n2019-12-07 01:55:18,143|msrest.http_logger|DEBUG| 'User-Agent': 'python/3.6.9 (Linux-4.15.0-1057-azure-x86_64-with-debian-10.0) msrest/0.6.10 azureml._restclient/core.1.0.76'\\n2019-12-07 01:55:18,143|msrest.http_logger|DEBUG|Request body:\\n2019-12-07 01:55:18,143|msrest.http_logger|DEBUG|None\\n2019-12-07 01:55:18,143|msrest.universal_http|DEBUG|Configuring redirects: allow=True, max=30\\n2019-12-07 01:55:18,143|msrest.universal_http|DEBUG|Configuring request: timeout=100, verify=True, cert=None\\n2019-12-07 01:55:18,143|msrest.universal_http|DEBUG|Configuring proxies: ''\\n2019-12-07 01:55:18,143|msrest.universal_http|DEBUG|Evaluate proxies against ENV settings: True\\n2019-12-07 01:55:18,196|msrest.http_logger|DEBUG|Response status: 200\\n2019-12-07 01:55:18,196|msrest.http_logger|DEBUG|Response headers:\\n2019-12-07 01:55:18,196|msrest.http_logger|DEBUG| 'Date': 'Sat, 07 Dec 2019 01:55:18 GMT'\\n2019-12-07 01:55:18,196|msrest.http_logger|DEBUG| 'Content-Type': 'application/json; charset=utf-8'\\n2019-12-07 01:55:18,196|msrest.http_logger|DEBUG| 'Transfer-Encoding': 'chunked'\\n2019-12-07 01:55:18,197|msrest.http_logger|DEBUG| 'Connection': 'keep-alive'\\n2019-12-07 01:55:18,197|msrest.http_logger|DEBUG| 'Vary': 'Accept-Encoding'\\n2019-12-07 01:55:18,197|msrest.http_logger|DEBUG| 'Request-Context': 'appId=cid-v1:2d2e8e63-272e-4b3c-8598-4ee570a0e70d'\\n2019-12-07 01:55:18,197|msrest.http_logger|DEBUG| 'x-ms-client-request-id': '066d53de-da2b-470f-936a-ed66dab2d28c'\\n2019-12-07 01:55:18,197|msrest.http_logger|DEBUG| 'x-ms-client-session-id': ''\\n2019-12-07 01:55:18,197|msrest.http_logger|DEBUG| 'Strict-Transport-Security': 'max-age=15724800; includeSubDomains; preload'\\n2019-12-07 01:55:18,197|msrest.http_logger|DEBUG| 'X-Content-Type-Options': 'nosniff'\\n2019-12-07 01:55:18,197|msrest.http_logger|DEBUG| 'Content-Encoding': 'gzip'\\n2019-12-07 01:55:18,197|msrest.http_logger|DEBUG|Response content:\\n2019-12-07 01:55:18,197|msrest.http_logger|DEBUG|{\\n \\\"runNumber\\\": 2107,\\n \\\"rootRunId\\\": \\\"020_AzureMLEstimator_1575683693_ddd16e31\\\",\\n \\\"experimentId\\\": \\\"8d96276b-f420-4a67-86be-f933dd3d38cd\\\",\\n \\\"createdUtc\\\": \\\"2019-12-07T01:54:55.3303306+00:00\\\",\\n \\\"createdBy\\\": {\\n \\\"userObjectId\\\": \\\"b77869a0-66f2-4288-89ef-13c10accc4dc\\\",\\n \\\"userPuId\\\": \\\"1003000090A95868\\\",\\n \\\"userIdp\\\": null,\\n \\\"userAltSecId\\\": null,\\n \\\"userIss\\\": \\\"https://sts.windows.net/72f988bf-86f1-41af-91ab-2d7cd011db47/\\\",\\n \\\"userTenantId\\\": \\\"72f988bf-86f1-41af-91ab-2d7cd011db47\\\",\\n \\\"userName\\\": \\\"George Iordanescu\\\"\\n },\\n \\\"userId\\\": \\\"b77869a0-66f2-4288-89ef-13c10accc4dc\\\",\\n \\\"token\\\": null,\\n \\\"tokenExpiryTimeUtc\\\": null,\\n \\\"error\\\": null,\\n \\\"warnings\\\": null,\\n \\\"revision\\\": 7,\\n \\\"runId\\\": \\\"020_AzureMLEstimator_1575683693_ddd16e31\\\",\\n \\\"parentRunId\\\": null,\\n \\\"status\\\": \\\"Running\\\",\\n \\\"startTimeUtc\\\": \\\"2019-12-07T01:55:07.6378716+00:00\\\",\\n \\\"endTimeUtc\\\": null,\\n \\\"heartbeatEnabled\\\": false,\\n \\\"options\\\": {\\n \\\"generateDataContainerIdIfNotSpecified\\\": true\\n },\\n \\\"name\\\": null,\\n \\\"dataContainerId\\\": \\\"dcid.020_AzureMLEstimator_1575683693_ddd16e31\\\",\\n \\\"description\\\": null,\\n \\\"hidden\\\": false,\\n \\\"runType\\\": \\\"azureml.scriptrun\\\",\\n \\\"properties\\\": {\\n \\\"_azureml.ComputeTargetType\\\": \\\"amlcompute\\\",\\n \\\"ContentSnapshotId\\\": \\\"a5071b2a-37a7-40da-8340-69cc894091cb\\\",\\n \\\"azureml.git.repository_uri\\\": \\\"git@github.com:georgeAccnt-GH/DeepSeismic.git\\\",\\n \\\"mlflow.source.git.repoURL\\\": \\\"git@github.com:georgeAccnt-GH/DeepSeismic.git\\\",\\n \\\"azureml.git.branch\\\": \\\"staging\\\",\\n \\\"mlflow.source.git.branch\\\": \\\"staging\\\",\\n \\\"azureml.git.commit\\\": \\\"1d3cd3340f4063508b6f707d5fc2a35f5429a07f\\\",\\n \\\"mlflow.source.git.commit\\\": \\\"1d3cd3340f4063508b6f707d5fc2a35f5429a07f\\\",\\n \\\"azureml.git.dirty\\\": \\\"True\\\",\\n \\\"ProcessInfoFile\\\": \\\"azureml-logs/process_info.json\\\",\\n \\\"ProcessStatusFile\\\": \\\"azureml-logs/process_status.json\\\"\\n },\\n \\\"scriptName\\\": \\\"azureml_01_modelling.py\\\",\\n \\\"target\\\": \\\"gpuclstfwi08\\\",\\n \\\"tags\\\": {},\\n \\\"inputDatasets\\\": [],\\n \\\"runDefinition\\\": null,\\n \\\"createdFrom\\\": {\\n \\\"type\\\": \\\"Notebook\\\",\\n \\\"locationType\\\": \\\"ArtifactId\\\",\\n \\\"location\\\": \\\"LocalUpload/020_AzureMLEstimator_1575683693_ddd16e31/030_ScaleJobsUsingAzuremL_GeophysicsTutorial_FWI_Azure_devito.ipynb\\\"\\n },\\n \\\"cancelUri\\\": \\\"https://eastus2.experiments.azureml.net/execution/v1.0/subscriptions/789908e0-5fc2-4c4d-b5f5-9764b0d602b3/resourceGroups/ghiordanfwirsg01/providers/Microsoft.MachineLearningServices/workspaces/ghiordanfwiws/experiments/020_AzureMLEstimator/runId/020_AzureMLEstimator_1575683693_ddd16e31/cancel\\\",\\n \\\"completeUri\\\": null,\\n \\\"diagnosticsUri\\\": \\\"https://eastus2.experiments.azureml.net/execution/v1.0/subscriptions/789908e0-5fc2-4c4d-b5f5-9764b0d602b3/resourceGroups/ghiordanfwirsg01/providers/Microsoft.MachineLearningServices/workspaces/ghiordanfwiws/experiments/020_AzureMLEstimator/runId/020_AzureMLEstimator_1575683693_ddd16e31/diagnostics\\\",\\n \\\"computeRequest\\\": {\\n \\\"nodeCount\\\": 1\\n },\\n \\\"retainForLifetimeOfWorkspace\\\": false\\n}\\n2019-12-07 01:55:18,202|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.RunClient.get-async:False|DEBUG|[STOP]\\n2019-12-07 01:55:18,202|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31|DEBUG|Constructing run from dto. type: azureml.scriptrun, source: None, props: {'_azureml.ComputeTargetType': 'amlcompute', 'ContentSnapshotId': 'a5071b2a-37a7-40da-8340-69cc894091cb', 'azureml.git.repository_uri': 'git@github.com:georgeAccnt-GH/DeepSeismic.git', 'mlflow.source.git.repoURL': 'git@github.com:georgeAccnt-GH/DeepSeismic.git', 'azureml.git.branch': 'staging', 'mlflow.source.git.branch': 'staging', 'azureml.git.commit': '1d3cd3340f4063508b6f707d5fc2a35f5429a07f', 'mlflow.source.git.commit': '1d3cd3340f4063508b6f707d5fc2a35f5429a07f', 'azureml.git.dirty': 'True', 'ProcessInfoFile': 'azureml-logs/process_info.json', 'ProcessStatusFile': 'azureml-logs/process_status.json'}\\n2019-12-07 01:55:18,202|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunContextManager|DEBUG|Valid logs dir, setting up content loader\\n2019-12-07 01:55:18,202|azureml|WARNING|Could not import azureml.mlflow or azureml.contrib.mlflow mlflow APIs will not run against AzureML services. Add azureml-mlflow as a conda dependency for the run if this behavior is desired\\n2019-12-07 01:55:18,203|azureml.WorkerPool|DEBUG|[START]\\n2019-12-07 01:55:18,203|azureml.SendRunKillSignal|DEBUG|[START]\\n2019-12-07 01:55:18,203|azureml.RunStatusContext|DEBUG|[START]\\n2019-12-07 01:55:18,203|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunContextManager.RunStatusContext|DEBUG|[START]\\n2019-12-07 01:55:18,203|azureml.WorkingDirectoryCM|DEBUG|[START]\\n2019-12-07 01:55:18,203|azureml.history._tracking.PythonWorkingDirectory.workingdir|DEBUG|[START]\\n2019-12-07 01:55:18,203|azureml.history._tracking.PythonWorkingDirectory|INFO|Current working dir: /mnt/batch/tasks/shared/LS_root/jobs/ghiordanfwiws/azureml/020_azuremlestimator_1575683693_ddd16e31/mounts/workspaceblobstore/azureml/020_AzureMLEstimator_1575683693_ddd16e31\\n2019-12-07 01:55:18,203|azureml.history._tracking.PythonWorkingDirectory.workingdir|DEBUG|Calling pyfs\\n2019-12-07 01:55:18,203|azureml.history._tracking.PythonWorkingDirectory.workingdir|DEBUG|Storing working dir for pyfs as /mnt/batch/tasks/shared/LS_root/jobs/ghiordanfwiws/azureml/020_azuremlestimator_1575683693_ddd16e31/mounts/workspaceblobstore/azureml/020_AzureMLEstimator_1575683693_ddd16e31\\n2019-12-07 01:55:20,151|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-07 01:55:20,151|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-07 01:55:20,151|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-07 01:55:20,151|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-07 01:55:20,152|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-07 01:55:20,152|azureml._base_sdk_common.service_discovery|DEBUG|Constructing mms service url in from history url environment variable None, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-07 01:55:20,152|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-07 01:55:20,152|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-07 01:55:20,152|azureml._base_sdk_common.service_discovery|DEBUG|Found history service url in environment variable AZUREML_SERVICE_ENDPOINT, history service url: https://eastus2.experiments.azureml.net.\\n2019-12-07 01:55:20,157|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-07 01:55:20,158|azureml._run_impl.run_history_facade|DEBUG|Created a static thread pool for RunHistoryFacade class\\n2019-12-07 01:55:20,162|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-07 01:55:20,166|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-07 01:55:20,170|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-07 01:55:20,175|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-07 01:55:20,175|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.RunClient.get-async:False|DEBUG|[START]\\n2019-12-07 01:55:20,175|msrest.service_client|DEBUG|Accept header absent and forced to application/json\\n2019-12-07 01:55:20,175|msrest.http_logger|DEBUG|Request URL: 'https://eastus2.experiments.azureml.net/history/v1.0/subscriptions/789908e0-5fc2-4c4d-b5f5-9764b0d602b3/resourceGroups/ghiordanfwirsg01/providers/Microsoft.MachineLearningServices/workspaces/ghiordanfwiws/experiments/020_AzureMLEstimator/runs/020_AzureMLEstimator_1575683693_ddd16e31'\\n2019-12-07 01:55:20,175|msrest.http_logger|DEBUG|Request method: 'GET'\\n2019-12-07 01:55:20,175|msrest.http_logger|DEBUG|Request headers:\\n2019-12-07 01:55:20,176|msrest.http_logger|DEBUG| 'Accept': 'application/json'\\n2019-12-07 01:55:20,176|msrest.http_logger|DEBUG| 'Content-Type': 'application/json; charset=utf-8'\\n2019-12-07 01:55:20,176|msrest.http_logger|DEBUG| 'x-ms-client-request-id': 'b087e081-4f44-4f48-8adf-8c816a59faae'\\n2019-12-07 01:55:20,176|msrest.http_logger|DEBUG| 'request-id': 'b087e081-4f44-4f48-8adf-8c816a59faae'\\n2019-12-07 01:55:20,176|msrest.http_logger|DEBUG| 'User-Agent': 'python/3.6.9 (Linux-4.15.0-1057-azure-x86_64-with-debian-10.0) msrest/0.6.10 azureml._restclient/core.1.0.76'\\n2019-12-07 01:55:20,176|msrest.http_logger|DEBUG|Request body:\\n2019-12-07 01:55:20,176|msrest.http_logger|DEBUG|None\\n2019-12-07 01:55:20,176|msrest.universal_http|DEBUG|Configuring redirects: allow=True, max=30\\n2019-12-07 01:55:20,176|msrest.universal_http|DEBUG|Configuring request: timeout=100, verify=True, cert=None\\n2019-12-07 01:55:20,176|msrest.universal_http|DEBUG|Configuring proxies: ''\\n2019-12-07 01:55:20,176|msrest.universal_http|DEBUG|Evaluate proxies against ENV settings: True\\n2019-12-07 01:55:20,259|msrest.http_logger|DEBUG|Response status: 200\\n2019-12-07 01:55:20,259|msrest.http_logger|DEBUG|Response headers:\\n2019-12-07 01:55:20,259|msrest.http_logger|DEBUG| 'Date': 'Sat, 07 Dec 2019 01:55:20 GMT'\\n2019-12-07 01:55:20,259|msrest.http_logger|DEBUG| 'Content-Type': 'application/json; charset=utf-8'\\n2019-12-07 01:55:20,260|msrest.http_logger|DEBUG| 'Transfer-Encoding': 'chunked'\\n2019-12-07 01:55:20,260|msrest.http_logger|DEBUG| 'Connection': 'keep-alive'\\n2019-12-07 01:55:20,260|msrest.http_logger|DEBUG| 'Vary': 'Accept-Encoding'\\n2019-12-07 01:55:20,260|msrest.http_logger|DEBUG| 'Request-Context': 'appId=cid-v1:2d2e8e63-272e-4b3c-8598-4ee570a0e70d'\\n2019-12-07 01:55:20,260|msrest.http_logger|DEBUG| 'x-ms-client-request-id': 'b087e081-4f44-4f48-8adf-8c816a59faae'\\n2019-12-07 01:55:20,260|msrest.http_logger|DEBUG| 'x-ms-client-session-id': ''\\n2019-12-07 01:55:20,260|msrest.http_logger|DEBUG| 'Strict-Transport-Security': 'max-age=15724800; includeSubDomains; preload'\\n2019-12-07 01:55:20,260|msrest.http_logger|DEBUG| 'X-Content-Type-Options': 'nosniff'\\n2019-12-07 01:55:20,260|msrest.http_logger|DEBUG| 'Content-Encoding': 'gzip'\\n2019-12-07 01:55:20,260|msrest.http_logger|DEBUG|Response content:\\n2019-12-07 01:55:20,260|msrest.http_logger|DEBUG|{\\n \\\"runNumber\\\": 2107,\\n \\\"rootRunId\\\": \\\"020_AzureMLEstimator_1575683693_ddd16e31\\\",\\n \\\"experimentId\\\": \\\"8d96276b-f420-4a67-86be-f933dd3d38cd\\\",\\n \\\"createdUtc\\\": \\\"2019-12-07T01:54:55.3303306+00:00\\\",\\n \\\"createdBy\\\": {\\n \\\"userObjectId\\\": \\\"b77869a0-66f2-4288-89ef-13c10accc4dc\\\",\\n \\\"userPuId\\\": \\\"1003000090A95868\\\",\\n \\\"userIdp\\\": null,\\n \\\"userAltSecId\\\": null,\\n \\\"userIss\\\": \\\"https://sts.windows.net/72f988bf-86f1-41af-91ab-2d7cd011db47/\\\",\\n \\\"userTenantId\\\": \\\"72f988bf-86f1-41af-91ab-2d7cd011db47\\\",\\n \\\"userName\\\": \\\"George Iordanescu\\\"\\n },\\n \\\"userId\\\": \\\"b77869a0-66f2-4288-89ef-13c10accc4dc\\\",\\n \\\"token\\\": null,\\n \\\"tokenExpiryTimeUtc\\\": null,\\n \\\"error\\\": null,\\n \\\"warnings\\\": null,\\n \\\"revision\\\": 7,\\n \\\"runId\\\": \\\"020_AzureMLEstimator_1575683693_ddd16e31\\\",\\n \\\"parentRunId\\\": null,\\n \\\"status\\\": \\\"Running\\\",\\n \\\"startTimeUtc\\\": \\\"2019-12-07T01:55:07.6378716+00:00\\\",\\n \\\"endTimeUtc\\\": null,\\n \\\"heartbeatEnabled\\\": false,\\n \\\"options\\\": {\\n \\\"generateDataContainerIdIfNotSpecified\\\": true\\n },\\n \\\"name\\\": null,\\n \\\"dataContainerId\\\": \\\"dcid.020_AzureMLEstimator_1575683693_ddd16e31\\\",\\n \\\"description\\\": null,\\n \\\"hidden\\\": false,\\n \\\"runType\\\": \\\"azureml.scriptrun\\\",\\n \\\"properties\\\": {\\n \\\"_azureml.ComputeTargetType\\\": \\\"amlcompute\\\",\\n \\\"ContentSnapshotId\\\": \\\"a5071b2a-37a7-40da-8340-69cc894091cb\\\",\\n \\\"azureml.git.repository_uri\\\": \\\"git@github.com:georgeAccnt-GH/DeepSeismic.git\\\",\\n \\\"mlflow.source.git.repoURL\\\": \\\"git@github.com:georgeAccnt-GH/DeepSeismic.git\\\",\\n \\\"azureml.git.branch\\\": \\\"staging\\\",\\n \\\"mlflow.source.git.branch\\\": \\\"staging\\\",\\n \\\"azureml.git.commit\\\": \\\"1d3cd3340f4063508b6f707d5fc2a35f5429a07f\\\",\\n \\\"mlflow.source.git.commit\\\": \\\"1d3cd3340f4063508b6f707d5fc2a35f5429a07f\\\",\\n \\\"azureml.git.dirty\\\": \\\"True\\\",\\n \\\"ProcessInfoFile\\\": \\\"azureml-logs/process_info.json\\\",\\n \\\"ProcessStatusFile\\\": \\\"azureml-logs/process_status.json\\\"\\n },\\n \\\"scriptName\\\": \\\"azureml_01_modelling.py\\\",\\n \\\"target\\\": \\\"gpuclstfwi08\\\",\\n \\\"tags\\\": {},\\n \\\"inputDatasets\\\": [],\\n \\\"runDefinition\\\": null,\\n \\\"createdFrom\\\": {\\n \\\"type\\\": \\\"Notebook\\\",\\n \\\"locationType\\\": \\\"ArtifactId\\\",\\n \\\"location\\\": \\\"LocalUpload/020_AzureMLEstimator_1575683693_ddd16e31/030_ScaleJobsUsingAzuremL_GeophysicsTutorial_FWI_Azure_devito.ipynb\\\"\\n },\\n \\\"cancelUri\\\": \\\"https://eastus2.experiments.azureml.net/execution/v1.0/subscriptions/789908e0-5fc2-4c4d-b5f5-9764b0d602b3/resourceGroups/ghiordanfwirsg01/providers/Microsoft.MachineLearningServices/workspaces/ghiordanfwiws/experiments/020_AzureMLEstimator/runId/020_AzureMLEstimator_1575683693_ddd16e31/cancel\\\",\\n \\\"completeUri\\\": null,\\n \\\"diagnosticsUri\\\": \\\"https://eastus2.experiments.azureml.net/execution/v1.0/subscriptions/789908e0-5fc2-4c4d-b5f5-9764b0d602b3/resourceGroups/ghiordanfwirsg01/providers/Microsoft.MachineLearningServices/workspaces/ghiordanfwiws/experiments/020_AzureMLEstimator/runId/020_AzureMLEstimator_1575683693_ddd16e31/diagnostics\\\",\\n \\\"computeRequest\\\": {\\n \\\"nodeCount\\\": 1\\n },\\n \\\"retainForLifetimeOfWorkspace\\\": false\\n}\\n2019-12-07 01:55:20,262|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.RunClient.get-async:False|DEBUG|[STOP]\\n2019-12-07 01:55:20,262|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31|DEBUG|Constructing run from dto. type: azureml.scriptrun, source: None, props: {'_azureml.ComputeTargetType': 'amlcompute', 'ContentSnapshotId': 'a5071b2a-37a7-40da-8340-69cc894091cb', 'azureml.git.repository_uri': 'git@github.com:georgeAccnt-GH/DeepSeismic.git', 'mlflow.source.git.repoURL': 'git@github.com:georgeAccnt-GH/DeepSeismic.git', 'azureml.git.branch': 'staging', 'mlflow.source.git.branch': 'staging', 'azureml.git.commit': '1d3cd3340f4063508b6f707d5fc2a35f5429a07f', 'mlflow.source.git.commit': '1d3cd3340f4063508b6f707d5fc2a35f5429a07f', 'azureml.git.dirty': 'True', 'ProcessInfoFile': 'azureml-logs/process_info.json', 'ProcessStatusFile': 'azureml-logs/process_status.json'}\\n2019-12-07 01:55:20,262|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunContextManager|DEBUG|Valid logs dir, setting up content loader\\n2019-12-07 01:55:48,084|azureml.core.authentication|DEBUG|Time to expire 1814346.915499 seconds\\n2019-12-07 01:56:18,084|azureml.core.authentication|DEBUG|Time to expire 1814316.915133 seconds\\n2019-12-07 01:56:25,858|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient|DEBUG|Overrides: Max batch size: 50, batch cushion: 5, Interval: 1.\\n2019-12-07 01:56:25,858|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch.PostMetricsBatchDaemon|DEBUG|Starting daemon and triggering first instance\\n2019-12-07 01:56:25,859|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient|DEBUG|Used for use_batch=True.\\n2019-12-07 01:56:25,924|azureml.history._tracking.PythonWorkingDirectory.workingdir|DEBUG|Calling pyfs\\n2019-12-07 01:56:25,924|azureml.history._tracking.PythonWorkingDirectory|INFO|Current working dir: /devito\\n2019-12-07 01:56:25,924|azureml.history._tracking.PythonWorkingDirectory.workingdir|DEBUG|pyfs has path /devito\\n2019-12-07 01:56:25,925|azureml.history._tracking.PythonWorkingDirectory.workingdir|DEBUG|Reverting working dir from /devito to /mnt/batch/tasks/shared/LS_root/jobs/ghiordanfwiws/azureml/020_azuremlestimator_1575683693_ddd16e31/mounts/workspaceblobstore/azureml/020_AzureMLEstimator_1575683693_ddd16e31\\n2019-12-07 01:56:25,925|azureml.history._tracking.PythonWorkingDirectory|INFO|Setting working dir to /mnt/batch/tasks/shared/LS_root/jobs/ghiordanfwiws/azureml/020_azuremlestimator_1575683693_ddd16e31/mounts/workspaceblobstore/azureml/020_AzureMLEstimator_1575683693_ddd16e31\\n2019-12-07 01:56:25,925|azureml.history._tracking.PythonWorkingDirectory.workingdir|DEBUG|[STOP]\\n2019-12-07 01:56:25,925|azureml.WorkingDirectoryCM|DEBUG|[STOP]\\n2019-12-07 01:56:25,925|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31|INFO|complete is not setting status for submitted runs.\\n2019-12-07 01:56:25,925|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.FlushingMetricsClient|DEBUG|[START]\\n2019-12-07 01:56:25,925|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient|DEBUG|Overrides: Max batch size: 50, batch cushion: 5, Interval: 1.\\n2019-12-07 01:56:25,925|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch.PostMetricsBatchDaemon|DEBUG|Starting daemon and triggering first instance\\n2019-12-07 01:56:25,925|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient|DEBUG|Used for use_batch=True.\\n2019-12-07 01:56:25,925|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|[START]\\n2019-12-07 01:56:25,925|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|flush timeout 300 is different from task queue timeout 120, using flush timeout\\n2019-12-07 01:56:25,926|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|Waiting 300 seconds on tasks: [].\\n2019-12-07 01:56:25,926|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch|DEBUG|\\n2019-12-07 01:56:25,926|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|[STOP]\\n2019-12-07 01:56:25,926|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.FlushingMetricsClient|DEBUG|[STOP]\\n2019-12-07 01:56:25,926|azureml.RunStatusContext|DEBUG|[STOP]\\n2019-12-07 01:56:25,926|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.FlushingMetricsClient|DEBUG|[START]\\n2019-12-07 01:56:25,926|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|[START]\\n2019-12-07 01:56:25,926|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|flush timeout 300.0 is different from task queue timeout 120, using flush timeout\\n2019-12-07 01:56:25,926|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|Waiting 300.0 seconds on tasks: [].\\n2019-12-07 01:56:25,926|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch|DEBUG|\\n2019-12-07 01:56:25,926|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|[STOP]\\n2019-12-07 01:56:25,926|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.FlushingMetricsClient|DEBUG|[STOP]\\n2019-12-07 01:56:25,926|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.FlushingMetricsClient|DEBUG|[START]\\n2019-12-07 01:56:25,927|azureml.BatchTaskQueueAdd_1_Batches|DEBUG|[Start]\\n2019-12-07 01:56:25,927|azureml.BatchTaskQueueAdd_1_Batches.WorkerPool|DEBUG|submitting future: _handle_batch\\n2019-12-07 01:56:25,927|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch|DEBUG|Batch size 1.\\n2019-12-07 01:56:25,927|azureml.BatchTaskQueueAdd_1_Batches.0__handle_batch|DEBUG|Using basic handler - no exception handling\\n2019-12-07 01:56:25,927|azureml._restclient.clientbase.WorkerPool|DEBUG|submitting future: _log_batch\\n2019-12-07 01:56:25,927|azureml.BatchTaskQueueAdd_1_Batches|DEBUG|Adding task 0__handle_batch to queue of approximate size: 0\\n2019-12-07 01:56:25,928|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.post_batch-async:False|DEBUG|[START]\\n2019-12-07 01:56:25,928|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch.0__log_batch|DEBUG|Using basic handler - no exception handling\\n2019-12-07 01:56:25,928|azureml.BatchTaskQueueAdd_1_Batches|DEBUG|[Stop] - waiting default timeout\\n2019-12-07 01:56:25,929|msrest.service_client|DEBUG|Accept header absent and forced to application/json\\n2019-12-07 01:56:25,929|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch|DEBUG|Adding task 0__log_batch to queue of approximate size: 0\\n2019-12-07 01:56:25,929|azureml.BatchTaskQueueAdd_1_Batches.WaitFlushSource:BatchTaskQueueAdd_1_Batches|DEBUG|[START]\\n2019-12-07 01:56:25,929|msrest.universal_http.requests|DEBUG|Configuring retry: max_retries=3, backoff_factor=0.8, max_backoff=90\\n2019-12-07 01:56:25,930|azureml.BatchTaskQueueAdd_1_Batches.WaitFlushSource:BatchTaskQueueAdd_1_Batches|DEBUG|Overriding default flush timeout from None to 120\\n2019-12-07 01:56:25,930|msrest.http_logger|DEBUG|Request URL: 'https://eastus2.experiments.azureml.net/history/v1.0/subscriptions/789908e0-5fc2-4c4d-b5f5-9764b0d602b3/resourceGroups/ghiordanfwirsg01/providers/Microsoft.MachineLearningServices/workspaces/ghiordanfwiws/experiments/020_AzureMLEstimator/runs/020_AzureMLEstimator_1575683693_ddd16e31/batch/metrics'\\n2019-12-07 01:56:25,930|azureml.BatchTaskQueueAdd_1_Batches.WaitFlushSource:BatchTaskQueueAdd_1_Batches|DEBUG|Waiting 120 seconds on tasks: [AsyncTask(0__handle_batch)].\\n2019-12-07 01:56:25,930|msrest.http_logger|DEBUG|Request method: 'POST'\\n2019-12-07 01:56:25,930|azureml.BatchTaskQueueAdd_1_Batches.0__handle_batch.WaitingTask|DEBUG|[START]\\n2019-12-07 01:56:25,930|msrest.http_logger|DEBUG|Request headers:\\n2019-12-07 01:56:25,930|azureml.BatchTaskQueueAdd_1_Batches.0__handle_batch.WaitingTask|DEBUG|Awaiter is BatchTaskQueueAdd_1_Batches\\n2019-12-07 01:56:25,931|msrest.http_logger|DEBUG| 'Accept': 'application/json'\\n2019-12-07 01:56:25,931|azureml.BatchTaskQueueAdd_1_Batches.0__handle_batch.WaitingTask|DEBUG|[STOP]\\n2019-12-07 01:56:25,931|msrest.http_logger|DEBUG| 'Content-Type': 'application/json-patch+json; charset=utf-8'\\n2019-12-07 01:56:25,931|azureml.BatchTaskQueueAdd_1_Batches|DEBUG|\\n2019-12-07 01:56:25,931|msrest.http_logger|DEBUG| 'x-ms-client-request-id': '18a01463-68a6-4c03-bc10-c9e912702ee6'\\n2019-12-07 01:56:25,931|azureml.BatchTaskQueueAdd_1_Batches.WaitFlushSource:BatchTaskQueueAdd_1_Batches|DEBUG|[STOP]\\n2019-12-07 01:56:25,931|msrest.http_logger|DEBUG| 'request-id': '18a01463-68a6-4c03-bc10-c9e912702ee6'\\n2019-12-07 01:56:25,931|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|[START]\\n2019-12-07 01:56:25,931|msrest.http_logger|DEBUG| 'Content-Length': '410'\\n2019-12-07 01:56:25,932|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|flush timeout 300.0 is different from task queue timeout 120, using flush timeout\\n2019-12-07 01:56:25,932|msrest.http_logger|DEBUG| 'User-Agent': 'python/3.6.9 (Linux-4.15.0-1057-azure-x86_64-with-debian-10.0) msrest/0.6.10 azureml._restclient/core.1.0.76 sdk_run'\\n2019-12-07 01:56:25,932|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|Waiting 300.0 seconds on tasks: [AsyncTask(0__log_batch)].\\n2019-12-07 01:56:25,932|msrest.http_logger|DEBUG|Request body:\\n2019-12-07 01:56:25,932|msrest.http_logger|DEBUG|{\\\"values\\\": [{\\\"metricId\\\": \\\"1a8ad3d8-accf-42da-a07d-fd00ef5ee1e6\\\", \\\"metricType\\\": \\\"azureml.v1.scalar\\\", \\\"createdUtc\\\": \\\"2019-12-07T01:56:25.858188Z\\\", \\\"name\\\": \\\"training_message01: \\\", \\\"description\\\": \\\"\\\", \\\"numCells\\\": 1, \\\"cells\\\": [{\\\"training_message01: \\\": \\\"finished experiment\\\"}], \\\"schema\\\": {\\\"numProperties\\\": 1, \\\"properties\\\": [{\\\"propertyId\\\": \\\"training_message01: \\\", \\\"name\\\": \\\"training_message01: \\\", \\\"type\\\": \\\"string\\\"}]}}]}\\n2019-12-07 01:56:25,932|msrest.universal_http|DEBUG|Configuring redirects: allow=True, max=30\\n2019-12-07 01:56:25,932|msrest.universal_http|DEBUG|Configuring request: timeout=100, verify=True, cert=None\\n2019-12-07 01:56:25,932|msrest.universal_http|DEBUG|Configuring proxies: ''\\n2019-12-07 01:56:25,932|msrest.universal_http|DEBUG|Evaluate proxies against ENV settings: True\\n2019-12-07 01:56:26,050|msrest.http_logger|DEBUG|Response status: 200\\n2019-12-07 01:56:26,051|msrest.http_logger|DEBUG|Response headers:\\n2019-12-07 01:56:26,051|msrest.http_logger|DEBUG| 'Date': 'Sat, 07 Dec 2019 01:56:26 GMT'\\n2019-12-07 01:56:26,051|msrest.http_logger|DEBUG| 'Content-Length': '0'\\n2019-12-07 01:56:26,051|msrest.http_logger|DEBUG| 'Connection': 'keep-alive'\\n2019-12-07 01:56:26,051|msrest.http_logger|DEBUG| 'Request-Context': 'appId=cid-v1:2d2e8e63-272e-4b3c-8598-4ee570a0e70d'\\n2019-12-07 01:56:26,051|msrest.http_logger|DEBUG| 'x-ms-client-request-id': '18a01463-68a6-4c03-bc10-c9e912702ee6'\\n2019-12-07 01:56:26,051|msrest.http_logger|DEBUG| 'x-ms-client-session-id': ''\\n2019-12-07 01:56:26,051|msrest.http_logger|DEBUG| 'Strict-Transport-Security': 'max-age=15724800; includeSubDomains; preload'\\n2019-12-07 01:56:26,051|msrest.http_logger|DEBUG| 'X-Content-Type-Options': 'nosniff'\\n2019-12-07 01:56:26,051|msrest.http_logger|DEBUG|Response content:\\n2019-12-07 01:56:26,051|msrest.http_logger|DEBUG|\\n2019-12-07 01:56:26,052|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.post_batch-async:False|DEBUG|[STOP]\\n2019-12-07 01:56:26,182|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch.0__log_batch.WaitingTask|DEBUG|[START]\\n2019-12-07 01:56:26,182|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch.0__log_batch.WaitingTask|DEBUG|Awaiter is PostMetricsBatch\\n2019-12-07 01:56:26,183|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch.0__log_batch.WaitingTask|DEBUG|[STOP]\\n2019-12-07 01:56:26,183|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch|DEBUG|Waiting on task: 0__log_batch.\\n1 tasks left. Current duration of flush 0.0002186298370361328 seconds.\\n\\n2019-12-07 01:56:26,183|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.PostMetricsBatch.WaitFlushSource:MetricsClient|DEBUG|[STOP]\\n2019-12-07 01:56:26,183|azureml._SubmittedRun#020_AzureMLEstimator_1575683693_ddd16e31.RunHistoryFacade.MetricsClient.FlushingMetricsClient|DEBUG|[STOP]\\n2019-12-07 01:56:26,183|azureml.SendRunKillSignal|DEBUG|[STOP]\\n2019-12-07 01:56:26,183|azureml.HistoryTrackingWorkerPool.WorkerPoolShutdown|DEBUG|[START]\\n2019-12-07 01:56:26,183|azureml.HistoryTrackingWorkerPool.WorkerPoolShutdown|DEBUG|[STOP]\\n2019-12-07 01:56:26,183|azureml.WorkerPool|DEBUG|[STOP]\\n\\nRun is completed.\", \"graph\": {}, \"widget_settings\": {\"childWidgetDisplay\": \"popup\", \"send_telemetry\": false, \"log_level\": \"NOTSET\", \"sdk_version\": \"1.0.76\"}, \"loading\": false}" + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Counter499: submission of job 499 on 400 nodes took 9.16640019416809 seconds \n", + "run list length 499\n" + ] + } + ], + "source": [ + "import time\n", + "from IPython.display import clear_output\n", + "\n", + "no_of_jobs = 500\n", + "no_of_nodes = 400\n", + "\n", + "job_counter = 0\n", + "print_cycle = 20\n", + "run_list = []\n", + "submit_time_list = []\n", + "for crt_nodes in range(no_of_nodes, (no_of_nodes+1)):\n", + " gpu_cluster.update(min_nodes=0, max_nodes=crt_nodes, idle_seconds_before_scaledown=1200)\n", + " clust_start_time = time.time()\n", + " for crt_job in range(1, no_of_jobs):\n", + " job_counter+= 1\n", + " start_time = time.time()\n", + " run = exp.submit(est)\n", + " end_time = time.time()\n", + " run_time = end_time - start_time\n", + " run_list.append(run)\n", + " submit_time_list.append(run_time)\n", + " print('Counter{}: submission of job {} on {} nodes took {} seconds '.format(job_counter, crt_job, crt_nodes, run_time))\n", + " print('run list length {}'.format(len(run_list)))\n", + " if ((job_counter-1) % print_cycle) == 0:\n", + " clear_output()\n", + " print('Showing details for run {}'.format(job_counter))\n", + " RunDetails(run).show()\n", + "# [all_jobs_done = True if (('Completed'==crt_queried_job.get_status()) for crt_queried_job in run_list)]" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([10.16889381, 10.52522182, 8.67223501, 7.76976609, 8.98659873,\n", + " 9.54043746, 7.56379271, 7.95067477, 10.98772812, 8.58469343,\n", + " 9.19690919, 8.37747335, 8.49322033, 8.96249437, 11.00566387,\n", + " 10.18721223, 8.70340395, 9.07873917, 8.83641577, 9.93886757,\n", + " 8.43751788, 8.88584614, 8.46158338, 8.10118651, 7.95576859,\n", + " 8.02682757, 8.59585524, 11.43893504, 8.21132302, 7.56929898,\n", + " 9.16166759, 7.96446443, 8.20211887, 8.0066514 , 8.16604567,\n", + " 9.03855515, 9.27646971, 7.88356876, 8.6105082 , 8.63279152,\n", + " 9.63798594, 7.88380122, 11.83064437, 7.67609763, 8.36450744,\n", + " 10.36203027, 8.20605659, 8.27934074, 8.71854138, 7.48072934,\n", + " 7.98534775, 7.88993239, 9.49783468, 8.20365477, 8.31964707,\n", + " 8.24653029, 9.14784336, 8.39632297, 8.88221884, 10.17075896,\n", + " 7.93166018, 8.50952411, 8.35107565, 8.62145162, 9.1473949 ,\n", + " 10.16314006, 9.48931861, 9.52163553, 10.48561263, 8.70149064,\n", + " 8.83968425, 8.77899456, 8.19752908, 8.23720503, 8.44300842,\n", + " 10.4865036 , 9.38597918, 8.16601682, 10.31557417, 9.39266205,\n", + " 9.3517375 , 8.26235414, 9.90602231, 8.08361053, 9.55309701,\n", + " 8.37694287, 8.2842195 , 9.27187061, 8.05741239, 9.81221128,\n", + " 8.67282987, 7.50111246, 8.84159875, 7.5928266 , 8.2180264 ,\n", + " 11.30247498, 8.97954369, 9.08557224, 8.62394547, 27.931288 ,\n", + " 11.31702137, 9.03355598, 9.82408452, 10.98696327, 8.15972924,\n", + " 8.10580516, 8.6766634 , 9.18826079, 9.91399217, 9.63535714,\n", + " 8.84899211, 8.59690166, 9.08935356, 7.87525439, 9.04824638,\n", + " 10.58436322, 8.05351543, 8.0442934 , 8.51687765, 8.23182964,\n", + " 7.90365982, 9.41734576, 7.82690763, 7.86053801, 8.81060672,\n", + " 15.63083076, 9.12365007, 8.4692018 , 8.38626456, 9.1455934 ,\n", + " 7.9579742 , 8.32254815, 9.60984373, 7.72059083, 9.80256414,\n", + " 8.03569841, 8.56897283, 9.88993764, 9.825032 , 9.10494757,\n", + " 7.96795917, 8.83923078, 8.12920213, 9.14702606, 10.44252062,\n", + " 8.11435223, 11.10698366, 8.54753256, 11.07914209, 8.0072608 ,\n", + " 8.64252162, 7.86998582, 8.16502595, 9.72599697, 8.01553535,\n", + " 8.05236411, 9.4306016 , 8.3510747 , 8.15123487, 7.73660946,\n", + " 8.78807712, 8.42650437, 9.09502602, 67.75333071, 14.179214 ,\n", + " 13.08692336, 14.52568007, 12.39239168, 8.40634942, 8.3893857 ,\n", + " 7.80925822, 8.04524732, 10.61561441, 9.33992386, 8.05361605,\n", + " 8.71911073, 8.13864756, 8.18779135, 8.03402972, 8.20232296,\n", + " 10.52845287, 8.21701574, 9.63750052, 8.16265893, 7.95386362,\n", + " 7.85334754, 7.96290469, 8.1984942 , 8.32950211, 17.0101552 ,\n", + " 14.20266891, 13.09765553, 14.32137418, 8.90045214, 9.79849219,\n", + " 7.7378149 , 8.17814636, 8.0692122 , 8.02391315, 7.73337412,\n", + " 8.24749708, 8.21430159, 8.42469835, 7.93915629, 8.17162681,\n", + " 9.29439068, 8.39062524, 8.05844831, 12.62865376, 8.03868556,\n", + " 8.03020358, 8.72658324, 7.98921943, 10.13008642, 8.36204886,\n", + " 9.8618927 , 8.84138846, 8.26497674, 8.53586483, 11.22441888,\n", + " 8.60046291, 9.52709126, 8.1862669 , 8.47402501, 8.08845234,\n", + " 8.0216496 , 8.25297642, 9.52822161, 8.53732967, 9.20458651,\n", + " 7.84344959, 8.76693869, 9.55830622, 9.32047439, 9.61785316,\n", + " 14.20765901, 13.20616293, 12.79950929, 13.23175693, 10.48755121,\n", + " 7.89634991, 8.62207508, 10.17518067, 9.5078795 , 8.16943836,\n", + " 11.88958383, 8.53581595, 8.78866196, 9.86849713, 8.38485384,\n", + " 7.80456519, 8.7930553 , 8.67091751, 11.64525867, 10.70969439,\n", + " 9.57600379, 7.88863015, 9.16765165, 8.10214615, 8.1002388 ,\n", + " 7.79884577, 7.84607792, 10.70999765, 8.32228923, 8.15903163,\n", + " 8.16516185, 11.13710332, 8.67460465, 8.04933095, 7.92010641,\n", + " 9.71926355, 7.96389985, 8.50223684, 7.80719972, 7.94503832,\n", + " 9.14503789, 8.74866915, 8.32825327, 9.38176489, 8.7043674 ,\n", + " 8.11469626, 8.39300489, 8.52375507, 9.48120856, 9.30481339,\n", + " 11.00180173, 8.00356221, 9.36562443, 11.26503015, 8.29429078,\n", + " 10.5787971 , 8.23888326, 8.25085521, 9.65488529, 10.22367787,\n", + " 8.86958766, 8.67924905, 9.8065629 , 9.98437238, 10.44085979,\n", + " 8.48997521, 13.41537356, 8.53429914, 9.41697288, 8.75000739,\n", + " 8.67022324, 10.65776849, 8.78767824, 29.17240787, 8.29843664,\n", + " 10.48030996, 8.60965252, 9.05648637, 11.23915553, 7.71198177,\n", + " 8.58811665, 11.27894258, 11.26059055, 8.08691239, 9.09145069,\n", + " 8.37398744, 9.33932018, 9.50723815, 14.62887979, 8.08766961,\n", + " 8.1010766 , 8.15962887, 7.86279893, 7.81253982, 8.72090292,\n", + " 28.51810336, 8.20156765, 8.10436082, 9.35736108, 10.11271501,\n", + " 8.28001332, 8.10338402, 7.82260585, 7.74735689, 9.37371802,\n", + " 7.83298874, 8.09861684, 11.44845009, 13.80942464, 13.86787438,\n", + " 12.95256805, 13.5946703 , 9.04438519, 8.42931032, 7.69650388,\n", + " 8.3203001 , 8.93009233, 8.99896145, 10.261621 , 9.76696181,\n", + " 8.42695355, 9.45543766, 8.35829163, 8.19327784, 8.54582119,\n", + " 10.28408813, 9.96855664, 9.4126513 , 8.85548735, 8.37564468,\n", + " 7.85812593, 11.26866746, 11.99777699, 8.90290856, 9.73011518,\n", + " 11.37953544, 9.56070495, 13.08286595, 7.91717887, 8.70709944,\n", + " 8.89286566, 9.43534017, 9.63375568, 9.45693254, 9.41722798,\n", + " 8.95478702, 10.59636545, 9.07217526, 8.91465688, 8.43598938,\n", + " 10.09872103, 8.53826594, 10.51633263, 8.16474724, 9.60920191,\n", + " 8.79985189, 11.08250904, 15.82575488, 13.72388315, 13.76962495,\n", + " 15.5107224 , 12.99527621, 9.55358648, 11.27318692, 10.64224267,\n", + " 9.28194666, 8.15835619, 10.34727526, 9.13943338, 8.47959018,\n", + " 12.95671797, 8.67874169, 9.48093748, 11.13487458, 11.16393185,\n", + " 9.45039058, 9.26687908, 10.83345985, 10.013412 , 12.88114643,\n", + " 8.90868664, 9.11424375, 10.62471223, 10.37447572, 8.56728458,\n", + " 11.44042325, 8.61506176, 14.37763166, 9.26899981, 9.01356244,\n", + " 12.6770153 , 7.95549965, 8.69824529, 8.16541219, 10.80149889,\n", + " 9.85532331, 9.16404986, 11.05029202, 8.95759201, 9.60003638,\n", + " 8.64066339, 11.99474025, 10.88645577, 9.82658648, 8.38357234,\n", + " 8.1931479 , 8.36809587, 8.34779596, 9.29737759, 7.71148348,\n", + " 8.34155583, 8.46944427, 9.46755242, 8.39070392, 9.67334032,\n", + " 9.42819619, 8.90718842, 8.95999622, 17.03638124, 14.13874507,\n", + " 14.17324162, 14.82433629, 10.27358413, 7.75390744, 10.63386297,\n", + " 10.74013877, 9.25264263, 8.88592076, 15.62230277, 8.68499494,\n", + " 7.90613437, 10.8253715 , 9.28829837, 9.96133757, 8.82941794,\n", + " 11.07499003, 9.08565426, 8.76584291, 11.91541052, 9.45269704,\n", + " 9.68554997, 9.76184082, 10.95884109, 9.22084093, 9.07609534,\n", + " 9.72482204, 8.66262245, 8.85580897, 12.12771249, 9.1096139 ,\n", + " 9.55135322, 9.73613167, 12.00068331, 9.63835907, 8.8003633 ,\n", + " 10.78142428, 10.36234426, 8.7075491 , 8.79299307, 10.6836946 ,\n", + " 8.24508142, 9.70224071, 8.64105797, 9.16640019])" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "(array([ 0, 0, 0, 16, 105, 85, 75, 61, 40]),\n", + " array([ 6. , 6.44444444, 6.88888889, 7.33333333, 7.77777778,\n", + " 8.22222222, 8.66666667, 9.11111111, 9.55555556, 10. ]))" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import numpy as np\n", + "np.asarray(submit_time_list)\n", + "np.histogram(np.asarray(submit_time_list), bins=np.linspace(6.0, 10.0, num=10), density=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Final print 24, time 107.859 seconds: Counter({'Completed': 478, 'Failed': 21})izing': 1})Running': 1})\r" + ] + } + ], + "source": [ + "def wait_for_run_list_to_finish(the_run_list, plot_results=True):\n", + " finished_status_list = ['Completed', 'Failed']\n", + " printing_counter = 0\n", + " start_time = time.time()\n", + " while (not all((crt_queried_job.get_status() in finished_status_list) for crt_queried_job in the_run_list)):\n", + " time.sleep(2)\n", + " printing_counter+= 1\n", + " crt_status = Counter([crt_queried_job.get_status() for crt_queried_job in the_run_list])\n", + " print('print {0:.0f}, time {1:.3f} seconds: {2}'.format(printing_counter, time.time() - start_time, \n", + " str(crt_status)), end=\"\\r\")\n", + " if plot_results:\n", + "# import numpy as np\n", + " import matplotlib.pyplot as plt\n", + " plt.bar(crt_status.keys(), crt_status.values())\n", + " plt.show()\n", + " \n", + "# indexes = np.arange(len(labels))\n", + "# width = 1\n", + "\n", + "# plt.bar(indexes, values, width)\n", + "# plt.xticks(indexes + width * 0.5, labels)\n", + "# plt.show()\n", + "\n", + "# from pandas import Series\n", + "# crt_status = Series([crt_queried_job.get_status() for crt_queried_job in the_run_list])\n", + "# status_counts = crt_status.value_counts().sort_index()\n", + "# print('print {0:.0f}, time {1:.3f} seconds: {2}'.format(printing_counter, time.time() - start_time, \n", + "# str(status_counts)), end=\"\\r\")\n", + "# final status\n", + " print('Final print {0:.0f}, time {1:.3f} seconds: {2}'.format(printing_counter, time.time() - start_time, \n", + " str(Counter([crt_queried_job.get_status() for crt_queried_job in the_run_list]))), end=\"\\r\") \n", + "\n", + "wait_for_run_list_to_finish(run_list, plot_results=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "run_durations = [get_run_duration(crt_queried_job) for crt_queried_job in run_list]\n", + "run_statuses = [crt_queried_job.get_status() for crt_queried_job in run_list]" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([28, 33, 15, 45, 18, 43, 30, 31, 65, 6, 42, 16, 11, 41, 19, 8, 5,\n", + " 2, 64, 34])" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + + "[244.173832 244.510378 245.027595 245.540781 247.395535 247.411761\n", + " 247.933416 248.256958 248.468753 249.724234 249.874347 250.013758\n", + " 250.53221 251.10704 251.400594 253.192625 253.421425 253.968411\n", + " 256.888013 260.331917]\n", + "['Completed' 'Completed' 'Completed' 'Completed' 'Completed' 'Completed'\n", + " 'Completed' 'Failed' 'Completed' 'Completed' 'Completed' 'Completed'\n", + " 'Failed' 'Completed' 'Completed' 'Completed' 'Completed' 'Completed'\n", + " 'Failed' 'Completed']\n" + ] + }, + { + "data": { + "text/plain": [ + "array([232, 54, 195, 214, 250, 48, 490, 261, 329, 140, 336, 129, 311,\n", + " 223, 226, 370, 319, 254, 197, 85])" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[92.52469 92.854187 93.127771 93.19945 93.319895 93.372538 93.557287\n", + " 93.579393 93.646901 93.681486 93.890417 94.05724 94.162242 94.165297\n", + " 94.182998 94.263456 94.316783 94.400242 94.406081 94.583321]\n", + "['Completed' 'Completed' 'Completed' 'Completed' 'Failed' 'Completed'\n", + " 'Failed' 'Failed' 'Completed' 'Completed' 'Completed' 'Completed'\n", + " 'Completed' 'Completed' 'Completed' 'Failed' 'Completed' 'Completed'\n", + " 'Failed' 'Completed']\n" + ] + }, + { + "data": { + "text/plain": [ + "(array([ 0, 0, 128, 320, 8, 1, 3, 3, 0]),\n", + " array([ 50. , 66.66666667, 83.33333333, 100. ,\n", + " 116.66666667, 133.33333333, 150. , 166.66666667,\n", + " 183.33333333, 200. ]))" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "run_durations = np.asarray(run_durations)\n", + "run_statuses = np.asarray(run_statuses)\n", + "\n", + "extreme_k = 20\n", + "#longest runs\n", + "indices = np.argsort(run_durations)[-extreme_k:]\n", + "indices\n", + "print(run_durations[indices])\n", + "print(run_statuses[indices])\n", + "#shortest runs\n", + "indices = np.argsort(run_durations)[0:extreme_k]\n", + "indices\n", + "print(run_durations[indices])\n", + "print(run_statuses[indices])\n", + "\n", + "#run_durations histogram - counts and bins\n", + "np.histogram(run_durations, bins=np.linspace(50, 200, num=10), density=False)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Finished running 030_ScaleJobsUsingAzuremL_GeophysicsTutorial_FWI_Azure_devito!\n" + ] + } + ], + "source": [ + "print('Finished running 030_ScaleJobsUsingAzuremL_GeophysicsTutorial_FWI_Azure_devito!')" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "fwi_dev_conda_environment Python", + "language": "python", + "name": "fwi_dev_conda_environment" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/contrib/scripts/README.md b/contrib/scripts/README.md new file mode 100644 index 00000000..836a05dc --- /dev/null +++ b/contrib/scripts/README.md @@ -0,0 +1,6 @@ +This folder contains a variety of scripts which might be useful. + +# Ablation Study + +Contained in `ablation.sh`, the script demonstrates running the HRNet model with various patch sizes. + diff --git a/contrib/scripts/ablation.sh b/contrib/scripts/ablation.sh new file mode 100755 index 00000000..81fcdaa6 --- /dev/null +++ b/contrib/scripts/ablation.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +source activate seismic-interpretation + +# Patch_Size 100: Patch vs Section Depth +python scripts/prepare_dutchf3.py split_train_val patch --data-dir=/mnt/dutch --stride=50 --patch=100 +python train.py OUTPUT_DIR /data/output/hrnet_patch TRAIN.DEPTH patch TRAIN.PATCH_SIZE 100 --cfg 'configs/hrnet.yaml' +python train.py OUTPUT_DIR /data/output/hrnet_section TRAIN.DEPTH section TRAIN.PATCH_SIZE 100 --cfg 'configs/hrnet.yaml' + +# Patch_Size 150: Patch vs Section Depth +python scripts/prepare_dutchf3.py split_train_val patch --data-dir=/mnt/dutch --stride=50 --patch=150 +python train.py OUTPUT_DIR /data/output/hrnet_patch TRAIN.DEPTH patch TRAIN.PATCH_SIZE 150 --cfg 'configs/hrnet.yaml' +python train.py OUTPUT_DIR /data/output/hrnet_section TRAIN.DEPTH section TRAIN.PATCH_SIZE 150 --cfg 'configs/hrnet.yaml' + +# Patch_Size 200: Patch vs Section Depth +python scripts/prepare_dutchf3.py split_train_val patch --data-dir=/mnt/dutch --stride=50 --patch=200 +python train.py OUTPUT_DIR /data/output/hrnet_patch TRAIN.DEPTH patch TRAIN.PATCH_SIZE 200 --cfg 'configs/hrnet.yaml' +python train.py OUTPUT_DIR /data/output/hrnet_section TRAIN.DEPTH section TRAIN.PATCH_SIZE 200 --cfg 'configs/hrnet.yaml' + +# Patch_Size 250: Patch vs Section Depth +python scripts/prepare_dutchf3.py split_train_val patch --data-dir=/mnt/dutch --stride=50 --patch=250 +python train.py OUTPUT_DIR /data/output/hrnet_patch TRAIN.DEPTH patch TRAIN.PATCH_SIZE 250 TRAIN.AUGMENTATIONS.RESIZE.HEIGHT 250 TRAIN.AUGMENTATIONS.RESIZE.WIDTH 250 --cfg 'configs/hrnet.yaml' +python train.py OUTPUT_DIR /data/output/hrnet_section TRAIN.DEPTH section TRAIN.PATCH_SIZE 250 TRAIN.AUGMENTATIONS.RESIZE.HEIGHT 250 TRAIN.AUGMENTATIONS.RESIZE.WIDTH 250 --cfg 'configs/hrnet.yaml' + diff --git a/contrib/scripts/download_hrnet.sh b/contrib/scripts/download_hrnet.sh new file mode 100755 index 00000000..157f20c3 --- /dev/null +++ b/contrib/scripts/download_hrnet.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# +# Example: +# download_hrnet.sh /data/models hrnet.pth +# + +echo Using "$1" as the download directory + +if [ ! -d "$1" ] +then + echo "Directory does not exist - creating..." + mkdir -p "$1" +fi + +full_path=$1/$2 + +echo "Downloading to ${full_path}" + +wget --header 'Host: optgaw.dm.files.1drv.com' \ + --user-agent 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0' \ + --header 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' \ + --header 'Accept-Language: en-GB,en;q=0.5' \ + --referer 'https://onedrive.live.com/' \ + --header 'Upgrade-Insecure-Requests: 1' 'https://optgaw.dm.files.1drv.com/y4m14W1OEuoniQMCT4m64UV8CSQT-dFe2ZRhU0LAZSal80V4phgVIlTYxI2tUi6BPVOy7l5rK8MKpZNywVvtz-NKL2ZWq-UYRL6MAjbLgdFA6zyW8RRrKBe_FcqcWr4YTXeJ18xfVqco6CdGZHFfORBE6EtFxEIrHWNjM032dWZLdqZ0eXd7RZTrHs1KKYa92zcs0Rj91CAyIK4hIaOomzEWA/hrnetv2_w48_imagenet_pretrained.pth?download&psid=1' \ + --output-document ${full_path} \ No newline at end of file diff --git a/contrib/scripts/get_F3_voxel.sh b/contrib/scripts/get_F3_voxel.sh new file mode 100755 index 00000000..850d73f8 --- /dev/null +++ b/contrib/scripts/get_F3_voxel.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +echo "Make sure you also download Dutch F3 data from https://github.com/bolgebrygg/MalenoV" +# fetch Dutch F3 from Malenov project. +# wget https://drive.google.com/open?id=0B7brcf-eGK8CUUZKLXJURFNYeXM -O interpretation/voxel2pixel/F3/data.segy + +if [ $# -eq 0 ] +then + downdirtrain='experiments/interpretation/voxel2pixel/F3/train' + downdirval='experiments/interpretation/voxel2pixel/F3/val' +else + downdirtrain=$1 + downdirval=$1 +fi + +mkdir -p ${downdirtrain} +mkdir -p ${downdirval} + +echo "Downloading train label to $downdirtrain and validation label to $downdirval" +wget https://github.com/waldeland/CNN-for-ASI/raw/master/F3/train/inline_339.png -O ${downdirtrain}/inline_339.png +wget https://github.com/waldeland/CNN-for-ASI/raw/master/F3/val/inline_405.png -O ${downdirval}/inline_405.png +echo "Download complete" diff --git a/cv_lib/AUTHORS.md b/cv_lib/AUTHORS.md new file mode 100644 index 00000000..173bb039 --- /dev/null +++ b/cv_lib/AUTHORS.md @@ -0,0 +1 @@ +[Mathew Salvaris] [@msalvaris](http://github.com/msalvaris/) diff --git a/cv_lib/README.md b/cv_lib/README.md new file mode 100644 index 00000000..7ff14865 --- /dev/null +++ b/cv_lib/README.md @@ -0,0 +1,11 @@ +# CVLib + +A set of utility functions for computer vision + +## Install + +```bash +pip install -e . +``` + +This will install the package cv_lib \ No newline at end of file diff --git a/cv_lib/cv_lib/__init__.py b/cv_lib/cv_lib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cv_lib/cv_lib/__version__.py b/cv_lib/cv_lib/__version__.py new file mode 100644 index 00000000..97b8b400 --- /dev/null +++ b/cv_lib/cv_lib/__version__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +__version__ = "0.0.1" diff --git a/cv_lib/cv_lib/event_handlers/__init__.py b/cv_lib/cv_lib/event_handlers/__init__.py new file mode 100644 index 00000000..589bbd86 --- /dev/null +++ b/cv_lib/cv_lib/event_handlers/__init__.py @@ -0,0 +1,42 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from ignite.handlers import ModelCheckpoint +import glob +import os +from shutil import copyfile + + +class SnapshotHandler: + def __init__(self, dir_name, filename_prefix, score_function, snapshot_function): + self._model_save_location = dir_name + self._running_model_prefix = filename_prefix + "_running" + self._snapshot_prefix = filename_prefix + "_snapshot" + self._snapshot_function = snapshot_function + self._snapshot_num = 1 + self._score_function = score_function + self._checkpoint_handler = self._create_checkpoint_handler() + + def _create_checkpoint_handler(self): + return ModelCheckpoint( + self._model_save_location, + self._running_model_prefix, + score_function=self._score_function, + n_saved=1, + create_dir=True, + save_as_state_dict=True, + require_empty=False, + ) + + def __call__(self, engine, to_save): + self._checkpoint_handler(engine, to_save) + if self._snapshot_function(): + files = glob.glob(os.path.join(self._model_save_location, self._running_model_prefix + "*")) + print(files) + name_postfix = os.path.basename(files[0]).lstrip(self._running_model_prefix) + copyfile( + files[0], + os.path.join(self._model_save_location, f"{self._snapshot_prefix}{self._snapshot_num}{name_postfix}",), + ) + self._checkpoint_handler = self._create_checkpoint_handler() # Reset the checkpoint handler + self._snapshot_num += 1 diff --git a/cv_lib/cv_lib/event_handlers/azureml_handlers.py b/cv_lib/cv_lib/event_handlers/azureml_handlers.py new file mode 100644 index 00000000..e69de29b diff --git a/cv_lib/cv_lib/event_handlers/logging_handlers.py b/cv_lib/cv_lib/event_handlers/logging_handlers.py new file mode 100644 index 00000000..b7c41651 --- /dev/null +++ b/cv_lib/cv_lib/event_handlers/logging_handlers.py @@ -0,0 +1,90 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import logging +import logging.config +from toolz import curry + +import numpy as np + +np.set_printoptions(precision=3) + + +@curry +def log_training_output(engine, log_interval=100): + logger = logging.getLogger(__name__) + + if engine.state.iteration % log_interval == 0: + logger.info(f"Epoch: {engine.state.epoch} Iter: {engine.state.iteration} loss {engine.state.output['loss']}") + + +@curry +def log_lr(optimizer, engine): + logger = logging.getLogger(__name__) + lr = [param_group["lr"] for param_group in optimizer.param_groups] + logger.info(f"lr - {lr}") + + +_DEFAULT_METRICS = {"pixacc": "Avg accuracy :", "nll": "Avg loss :"} + + +@curry +def log_metrics(log_msg, engine, metrics_dict=_DEFAULT_METRICS): + logger = logging.getLogger(__name__) + metrics = engine.state.metrics + metrics_msg = " ".join([f"{metrics_dict[k]} {metrics[k]:.2f}" for k in metrics_dict]) + logger.info(f"{log_msg} - Epoch {engine.state.epoch} [{engine.state.max_epochs}] " + metrics_msg) + + +@curry +def log_class_metrics(log_msg, engine, metrics_dict): + logger = logging.getLogger(__name__) + metrics = engine.state.metrics + metrics_msg = "\n".join(f"{metrics_dict[k]} {metrics[k].numpy()}" for k in metrics_dict) + logger.info(f"{log_msg} - Epoch {engine.state.epoch} [{engine.state.max_epochs}]\n" + metrics_msg) + + +class Evaluator: + def __init__(self, evaluation_engine, data_loader): + self._evaluation_engine = evaluation_engine + self._data_loader = data_loader + + def __call__(self, engine): + self._evaluation_engine.run(self._data_loader) + + +class HorovodLRScheduler: + """ + Horovod: using `lr = base_lr * hvd.size()` from the very beginning leads to worse final + accuracy. Scale the learning rate `lr = base_lr` ---> `lr = base_lr * hvd.size()` during + the first five epochs. See https://arxiv.org/abs/1706.02677 for details. + After the warmup reduce learning rate by 10 on the 30th, 60th and 80th epochs. + """ + + def __init__( + self, base_lr, warmup_epochs, cluster_size, data_loader, optimizer, batches_per_allreduce, + ): + self._warmup_epochs = warmup_epochs + self._cluster_size = cluster_size + self._data_loader = data_loader + self._optimizer = optimizer + self._base_lr = base_lr + self._batches_per_allreduce = batches_per_allreduce + self._logger = logging.getLogger(__name__) + + def __call__(self, engine): + epoch = engine.state.epoch + if epoch < self._warmup_epochs: + epoch += float(engine.state.iteration + 1) / len(self._data_loader) + lr_adj = 1.0 / self._cluster_size * (epoch * (self._cluster_size - 1) / self._warmup_epochs + 1) + elif epoch < 30: + lr_adj = 1.0 + elif epoch < 60: + lr_adj = 1e-1 + elif epoch < 80: + lr_adj = 1e-2 + else: + lr_adj = 1e-3 + for param_group in self._optimizer.param_groups: + param_group["lr"] = self._base_lr * self._cluster_size * self._batches_per_allreduce * lr_adj + self._logger.debug(f"Adjust learning rate {param_group['lr']}") diff --git a/cv_lib/cv_lib/event_handlers/tensorboard_handlers.py b/cv_lib/cv_lib/event_handlers/tensorboard_handlers.py new file mode 100644 index 00000000..654c9b4d --- /dev/null +++ b/cv_lib/cv_lib/event_handlers/tensorboard_handlers.py @@ -0,0 +1,69 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from toolz import curry +import torchvision +import logging +import logging.config + +try: + from tensorboardX import SummaryWriter +except ImportError: + raise RuntimeError("No tensorboardX package is found. Please install with the command: \npip install tensorboardX") + + +def create_summary_writer(log_dir): + writer = SummaryWriter(logdir=log_dir) + return writer + + +def _log_model_output(log_label, summary_writer, engine): + summary_writer.add_scalar(log_label, engine.state.output["loss"], engine.state.iteration) + + +@curry +def log_training_output(summary_writer, engine): + _log_model_output("training/loss", summary_writer, engine) + + +@curry +def log_validation_output(summary_writer, engine): + _log_model_output("validation/loss", summary_writer, engine) + + +@curry +def log_lr(summary_writer, optimizer, log_interval, engine): + """[summary] + + Args: + optimizer ([type]): [description] + log_interval ([type]): iteration or epoch + summary_writer ([type]): [description] + engine ([type]): [description] + """ + lr = [param_group["lr"] for param_group in optimizer.param_groups] + summary_writer.add_scalar("lr", lr[0], getattr(engine.state, log_interval)) + + +_DEFAULT_METRICS = {"accuracy": "Avg accuracy :", "nll": "Avg loss :"} + + +@curry +def log_metrics(summary_writer, train_engine, log_interval, engine, metrics_dict=_DEFAULT_METRICS): + metrics = engine.state.metrics + for m in metrics_dict: + summary_writer.add_scalar(metrics_dict[m], metrics[m], getattr(train_engine.state, log_interval)) + + +def create_image_writer(summary_writer, label, output_variable, normalize=False, transform_func=lambda x: x): + logger = logging.getLogger(__name__) + + def write_to(engine): + try: + data_tensor = transform_func(engine.state.output[output_variable]) + image_grid = torchvision.utils.make_grid(data_tensor, normalize=normalize, scale_each=True) + summary_writer.add_image(label, image_grid, engine.state.epoch) + except KeyError: + logger.warning("Predictions and or ground truth labels not available to report") + + return write_to diff --git a/cv_lib/cv_lib/segmentation/__init__.py b/cv_lib/cv_lib/segmentation/__init__.py new file mode 100644 index 00000000..4306a4e0 --- /dev/null +++ b/cv_lib/cv_lib/segmentation/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from toolz import curry +import torch.nn.functional as F + + +@curry +def extract_metric_from(metric, engine): + metrics = engine.state.metrics + return metrics[metric] + + +@curry +def padded_val_transform(pad_left, fine_size, x, y, y_pred): + y_pred = y_pred[:, :, pad_left : pad_left + fine_size, pad_left : pad_left + fine_size].contiguous() + return {"image": x, "y_pred": F.sigmoid(y_pred).detach(), "mask": y.detach()} diff --git a/cv_lib/cv_lib/segmentation/dutchf3/augmentations.py b/cv_lib/cv_lib/segmentation/dutchf3/augmentations.py new file mode 100644 index 00000000..e4df608f --- /dev/null +++ b/cv_lib/cv_lib/segmentation/dutchf3/augmentations.py @@ -0,0 +1,221 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import math +import numbers +import random +import numpy as np + +from PIL import Image, ImageOps + + +class Compose(object): + def __init__(self, augmentations): + self.augmentations = augmentations + + def __call__(self, img, mask): + + img, mask = Image.fromarray(img, mode=None), Image.fromarray(mask, mode="L") + assert img.size == mask.size + + for a in self.augmentations: + img, mask = a(img, mask) + return np.array(img), np.array(mask, dtype=np.uint8) + + +class AddNoise(object): + def __call__(self, img, mask): + noise = np.random.normal(loc=0, scale=0.02, size=(img.size[1], img.size[0])) + return img + noise, mask + + +class RandomCrop(object): + def __init__(self, size, padding=0): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size + self.padding = padding + + def __call__(self, img, mask): + if self.padding > 0: + img = ImageOps.expand(img, border=self.padding, fill=0) + mask = ImageOps.expand(mask, border=self.padding, fill=0) + + assert img.size == mask.size + w, h = img.size + th, tw = self.size + if w == tw and h == th: + return img, mask + if w < tw or h < th: + return ( + img.resize((tw, th), Image.BILINEAR), + mask.resize((tw, th), Image.NEAREST), + ) + + x1 = random.randint(0, w - tw) + y1 = random.randint(0, h - th) + return ( + img.crop((x1, y1, x1 + tw, y1 + th)), + mask.crop((x1, y1, x1 + tw, y1 + th)), + ) + + +class CenterCrop(object): + def __init__(self, size): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size + + def __call__(self, img, mask): + assert img.size == mask.size + w, h = img.size + th, tw = self.size + x1 = int(round((w - tw) / 2.0)) + y1 = int(round((h - th) / 2.0)) + return ( + img.crop((x1, y1, x1 + tw, y1 + th)), + mask.crop((x1, y1, x1 + tw, y1 + th)), + ) + + +class RandomHorizontallyFlip(object): + def __call__(self, img, mask): + if random.random() < 0.5: + # Note: we use FLIP_TOP_BOTTOM here intentionaly. Due to the dimensions of the image, + # it ends up being a horizontal flip. + return ( + img.transpose(Image.FLIP_TOP_BOTTOM), + mask.transpose(Image.FLIP_TOP_BOTTOM), + ) + return img, mask + + +class RandomVerticallyFlip(object): + def __call__(self, img, mask): + if random.random() < 0.5: + return ( + img.transpose(Image.FLIP_LEFT_RIGHT), + mask.transpose(Image.FLIP_LEFT_RIGHT), + ) + return img, mask + + +class FreeScale(object): + def __init__(self, size): + self.size = tuple(reversed(size)) # size: (h, w) + + def __call__(self, img, mask): + assert img.size == mask.size + return ( + img.resize(self.size, Image.BILINEAR), + mask.resize(self.size, Image.NEAREST), + ) + + +class Scale(object): + def __init__(self, size): + self.size = size + + def __call__(self, img, mask): + assert img.size == mask.size + w, h = img.size + if (w >= h and w == self.size) or (h >= w and h == self.size): + return img, mask + if w > h: + ow = self.size + oh = int(self.size * h / w) + return ( + img.resize((ow, oh), Image.BILINEAR), + mask.resize((ow, oh), Image.NEAREST), + ) + else: + oh = self.size + ow = int(self.size * w / h) + return ( + img.resize((ow, oh), Image.BILINEAR), + mask.resize((ow, oh), Image.NEAREST), + ) + + +class RandomSizedCrop(object): + def __init__(self, size): + self.size = size + + def __call__(self, img, mask): + assert img.size == mask.size + for attempt in range(10): + area = img.size[0] * img.size[1] + target_area = random.uniform(0.45, 1.0) * area + aspect_ratio = random.uniform(0.5, 2) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if random.random() < 0.5: + w, h = h, w + + if w <= img.size[0] and h <= img.size[1]: + x1 = random.randint(0, img.size[0] - w) + y1 = random.randint(0, img.size[1] - h) + + img = img.crop((x1, y1, x1 + w, y1 + h)) + mask = mask.crop((x1, y1, x1 + w, y1 + h)) + assert img.size == (w, h) + + return ( + img.resize((self.size, self.size), Image.BILINEAR), + mask.resize((self.size, self.size), Image.NEAREST), + ) + + # Fallback + scale = Scale(self.size) + crop = CenterCrop(self.size) + return crop(*scale(img, mask)) + + +class RandomRotate(object): + def __init__(self, degree): + self.degree = degree + + def __call__(self, img, mask): + """ + PIL automatically adds zeros to the borders of images that rotated. To fix this + issue, the code in the botton sets anywhere in the labels (mask) that is zero to + 255 (the value used for ignore_index). + """ + rotate_degree = random.random() * 2 * self.degree - self.degree + + img = img.rotate(rotate_degree, Image.BILINEAR) + mask = mask.rotate(rotate_degree, Image.NEAREST) + + binary_mask = Image.fromarray(np.ones([mask.size[1], mask.size[0]])) + binary_mask = binary_mask.rotate(rotate_degree, Image.NEAREST) + binary_mask = np.array(binary_mask) + + mask_arr = np.array(mask) + mask_arr[binary_mask == 0] = 255 + mask = Image.fromarray(mask_arr) + + return img, mask + + +class RandomSized(object): + def __init__(self, size): + self.size = size + self.scale = Scale(self.size) + self.crop = RandomCrop(self.size) + + def __call__(self, img, mask): + assert img.size == mask.size + + w = int(random.uniform(0.5, 2) * img.size[0]) + h = int(random.uniform(0.5, 2) * img.size[1]) + + img, mask = ( + img.resize((w, h), Image.BILINEAR), + mask.resize((w, h), Image.NEAREST), + ) + + return self.crop(*self.scale(img, mask)) diff --git a/cv_lib/cv_lib/segmentation/dutchf3/engine.py b/cv_lib/cv_lib/segmentation/dutchf3/engine.py new file mode 100644 index 00000000..c137af5c --- /dev/null +++ b/cv_lib/cv_lib/segmentation/dutchf3/engine.py @@ -0,0 +1,130 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import torch + +from ignite.engine.engine import Engine, State, Events +from ignite.utils import convert_tensor +import torch.nn.functional as F +from toolz import curry +from torch.nn import functional as F +import numpy as np + + +def _upscale_model_output(y_pred, y): + ph, pw = y_pred.size(2), y_pred.size(3) + h, w = y.size(2), y.size(3) + if ph != h or pw != w: + y_pred = F.upsample(input=y_pred, size=(h, w), mode="bilinear") + return y_pred + + +def create_supervised_trainer( + model, + optimizer, + loss_fn, + prepare_batch, + device=None, + non_blocking=False, + output_transform=lambda x, y, y_pred, loss: {"loss": loss.item()}, +): + if device: + model.to(device) + + def _update(engine, batch): + model.train() + optimizer.zero_grad() + x, y = prepare_batch(batch, device=device, non_blocking=non_blocking) + y_pred = model(x) + y_pred = _upscale_model_output(y_pred, y) + loss = loss_fn(y_pred.squeeze(1), y.squeeze(1)) + loss.backward() + optimizer.step() + return output_transform(x, y, y_pred, loss) + + return Engine(_update) + + +@curry +def val_transform(x, y, y_pred): + return {"image": x, "y_pred": y_pred.detach(), "mask": y.detach()} + + +def create_supervised_evaluator( + model, prepare_batch, metrics=None, device=None, non_blocking=False, output_transform=val_transform, +): + metrics = metrics or {} + + if device: + model.to(device) + + def _inference(engine, batch): + model.eval() + with torch.no_grad(): + x, y = prepare_batch(batch, device=device, non_blocking=non_blocking) + y_pred = model(x) + y_pred = _upscale_model_output(y_pred, x) + return output_transform(x, y, y_pred) + + engine = Engine(_inference) + + for name, metric in metrics.items(): + metric.attach(engine, name) + + return engine + + +def create_supervised_trainer_apex( + model, + optimizer, + loss_fn, + prepare_batch, + device=None, + non_blocking=False, + output_transform=lambda x, y, y_pred, loss: {"loss": loss.item()}, +): + from apex import amp + + if device: + model.to(device) + + def _update(engine, batch): + model.train() + optimizer.zero_grad() + x, y = prepare_batch(batch, device=device, non_blocking=non_blocking) + y_pred = model(x) + loss = loss_fn(y_pred.squeeze(1), y.squeeze(1)) + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + optimizer.step() + return output_transform(x, y, y_pred, loss) + + return Engine(_update) + + +# def create_supervised_evaluator_apex( +# model, +# prepare_batch, +# metrics=None, +# device=None, +# non_blocking=False, +# output_transform=lambda x, y, y_pred: (x, y, pred), +# ): +# metrics = metrics or {} + +# if device: +# model.to(device) + +# def _inference(engine, batch): +# model.eval() +# with torch.no_grad(): +# x, y = prepare_batch(batch, device=device, non_blocking=non_blocking) +# y_pred = model(x) +# return output_transform(x, y, y_pred) + +# engine = Engine(_inference) + +# for name, metric in metrics.items(): +# metric.attach(engine, name) + +# return engine diff --git a/cv_lib/cv_lib/segmentation/dutchf3/utils.py b/cv_lib/cv_lib/segmentation/dutchf3/utils.py new file mode 100644 index 00000000..adad1e97 --- /dev/null +++ b/cv_lib/cv_lib/segmentation/dutchf3/utils.py @@ -0,0 +1,46 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import numpy as np +import torch +from git import Repo +from datetime import datetime +import os + + +def np_to_tb(array): + # if 2D : + if array.ndim == 2: + # HW => CHW + array = np.expand_dims(array, axis=0) + # CHW => NCHW + array = np.expand_dims(array, axis=0) + elif array.ndim == 3: + # HWC => CHW + array = array.transpose(2, 0, 1) + # CHW => NCHW + array = np.expand_dims(array, axis=0) + + array = torch.from_numpy(array) + return array + + +def current_datetime(): + return datetime.now().strftime("%b%d_%H%M%S") + + +def git_branch(): + repo = Repo(search_parent_directories=True) + return repo.active_branch.name + + +def git_hash(): + repo = Repo(search_parent_directories=True) + return repo.active_branch.commit.hexsha + + +def generate_path(base_path, *directories): + path = os.path.join(base_path, *directories) + if not os.path.exists(path): + os.makedirs(path) + return path diff --git a/cv_lib/cv_lib/segmentation/metrics.py b/cv_lib/cv_lib/segmentation/metrics.py new file mode 100644 index 00000000..2d28a954 --- /dev/null +++ b/cv_lib/cv_lib/segmentation/metrics.py @@ -0,0 +1,94 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import torch +import ignite + + +def pixelwise_accuracy(num_classes, output_transform=lambda x: x, device=None): + """Calculates class accuracy + + Args: + num_classes (int): number of classes + output_transform (callable, optional): a callable that is used to transform the + output into the form expected by the metric. + + Returns: + MetricsLambda + + """ + cm = ignite.metrics.ConfusionMatrix(num_classes=num_classes, output_transform=output_transform, device=device) + # Increase floating point precision and pass to CPU + cm = cm.type(torch.DoubleTensor) + + pix_cls = ignite.metrics.confusion_matrix.cmAccuracy(cm) + + return pix_cls + + +def class_accuracy(num_classes, output_transform=lambda x: x, device=None): + """Calculates class accuracy + + Args: + num_classes (int): number of classes + output_transform (callable, optional): a callable that is used to transform the + output into the form expected by the metric. + + Returns: + MetricsLambda + + """ + cm = ignite.metrics.ConfusionMatrix(num_classes=num_classes, output_transform=output_transform, device=device) + # Increase floating point precision and pass to CPU + cm = cm.type(torch.DoubleTensor) + + acc_cls = cm.diag() / (cm.sum(dim=1) + 1e-15) + + return acc_cls + + +def mean_class_accuracy(num_classes, output_transform=lambda x: x, device=None): + """Calculates mean class accuracy + + Args: + num_classes (int): number of classes + output_transform (callable, optional): a callable that is used to transform the + output into the form expected by the metric. + + Returns: + MetricsLambda + + """ + return class_accuracy(num_classes=num_classes, output_transform=output_transform, device=device).mean() + + +def class_iou(num_classes, output_transform=lambda x: x, device=None, ignore_index=None): + """Calculates per-class intersection-over-union + + Args: + num_classes (int): number of classes + output_transform (callable, optional): a callable that is used to transform the + output into the form expected by the metric. + + Returns: + MetricsLambda + + """ + cm = ignite.metrics.ConfusionMatrix(num_classes=num_classes, output_transform=output_transform, device=device) + return ignite.metrics.IoU(cm, ignore_index=ignore_index) + + +def mean_iou(num_classes, output_transform=lambda x: x, device=None, ignore_index=None): + """Calculates mean intersection-over-union + + Args: + num_classes (int): number of classes + output_transform (callable, optional): a callable that is used to transform the + output into the form expected by the metric. + + Returns: + MetricsLambda + + """ + cm = ignite.metrics.ConfusionMatrix(num_classes=num_classes, output_transform=output_transform, device=device) + return ignite.metrics.mIoU(cm, ignore_index=ignore_index) diff --git a/cv_lib/cv_lib/segmentation/models/__init__.py b/cv_lib/cv_lib/segmentation/models/__init__.py new file mode 100644 index 00000000..11d443e4 --- /dev/null +++ b/cv_lib/cv_lib/segmentation/models/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import cv_lib.segmentation.models.seg_hrnet # noqa: F401 +import cv_lib.segmentation.models.resnet_unet # noqa: F401 +import cv_lib.segmentation.models.unet # noqa: F401 +import cv_lib.segmentation.models.section_deconvnet # noqa: F401 +import cv_lib.segmentation.models.patch_deconvnet # noqa: F401 +import cv_lib.segmentation.models.patch_deconvnet_skip # noqa: F401 +import cv_lib.segmentation.models.section_deconvnet_skip # noqa: F401 diff --git a/cv_lib/cv_lib/segmentation/models/patch_deconvnet.py b/cv_lib/cv_lib/segmentation/models/patch_deconvnet.py new file mode 100644 index 00000000..4ee1ed59 --- /dev/null +++ b/cv_lib/cv_lib/segmentation/models/patch_deconvnet.py @@ -0,0 +1,308 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import torch.nn as nn + + +class patch_deconvnet(nn.Module): + def __init__(self, n_classes=4, learned_billinear=False): + super(patch_deconvnet, self).__init__() + self.learned_billinear = learned_billinear + self.n_classes = n_classes + self.unpool = nn.MaxUnpool2d(2, stride=2) + self.conv_block1 = nn.Sequential( + # conv1_1 + nn.Conv2d(1, 64, 3, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv1_2 + nn.Conv2d(64, 64, 3, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool1 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_1 + + # 48*48 + + self.conv_block2 = nn.Sequential( + # conv2_1 + nn.Conv2d(64, 128, 3, padding=1), + nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv2_2 + nn.Conv2d(128, 128, 3, padding=1), + nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool2 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_2 + + # 24*24 + + self.conv_block3 = nn.Sequential( + # conv3_1 + nn.Conv2d(128, 256, 3, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv3_2 + nn.Conv2d(256, 256, 3, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv3_3 + nn.Conv2d(256, 256, 3, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool3 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_3 + + # 12*12 + + self.conv_block4 = nn.Sequential( + # conv4_1 + nn.Conv2d(256, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv4_2 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv4_3 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool4 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_4 + + # 6*6 + + self.conv_block5 = nn.Sequential( + # conv5_1 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv5_2 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv5_3 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool5 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_5 + + # 3*3 + + self.conv_block6 = nn.Sequential( + # fc6 + nn.Conv2d(512, 4096, 3), + # set the filter size and nor padding to make output into 1*1 + nn.BatchNorm2d(4096, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + # 1*1 + + self.conv_block7 = nn.Sequential( + # fc7 + nn.Conv2d(4096, 4096, 1), + # set the filter size to make output into 1*1 + nn.BatchNorm2d(4096, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.deconv_block8 = nn.Sequential( + # fc6-deconv + nn.ConvTranspose2d(4096, 512, 3, stride=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + # 3*3 + + self.unpool_block9 = nn.Sequential( + # unpool5 + nn.MaxUnpool2d(2, stride=2), + ) + # usage unpool(output, indices) + + # 6*6 + + self.deconv_block10 = nn.Sequential( + # deconv5_1 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv5_2 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv5_3 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.unpool_block11 = nn.Sequential( + # unpool4 + nn.MaxUnpool2d(2, stride=2), + ) + + # 12*12 + + self.deconv_block12 = nn.Sequential( + # deconv4_1 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv4_2 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv4_3 + nn.ConvTranspose2d(512, 256, 3, stride=1, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.unpool_block13 = nn.Sequential( + # unpool3 + nn.MaxUnpool2d(2, stride=2), + ) + + # 24*24 + + self.deconv_block14 = nn.Sequential( + # deconv3_1 + nn.ConvTranspose2d(256, 256, 3, stride=1, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv3_2 + nn.ConvTranspose2d(256, 256, 3, stride=1, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv3_3 + nn.ConvTranspose2d(256, 128, 3, stride=1, padding=1), + nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.unpool_block15 = nn.Sequential( + # unpool2 + nn.MaxUnpool2d(2, stride=2), + ) + + # 48*48 + + self.deconv_block16 = nn.Sequential( + # deconv2_1 + nn.ConvTranspose2d(128, 128, 3, stride=1, padding=1), + nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv2_2 + nn.ConvTranspose2d(128, 64, 3, stride=1, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.unpool_block17 = nn.Sequential( + # unpool1 + nn.MaxUnpool2d(2, stride=2), + ) + + # 96*96 + + self.deconv_block18 = nn.Sequential( + # deconv1_1 + nn.ConvTranspose2d(64, 64, 3, stride=1, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv1_2 + nn.ConvTranspose2d(64, 64, 3, stride=1, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.seg_score19 = nn.Sequential( + # seg-score + nn.Conv2d(64, self.n_classes, 1), + ) + + if self.learned_billinear: + raise NotImplementedError + + def forward(self, x): + size0 = x.size() + conv1, indices1 = self.conv_block1(x) + size1 = conv1.size() + conv2, indices2 = self.conv_block2(conv1) + size2 = conv2.size() + conv3, indices3 = self.conv_block3(conv2) + size3 = conv3.size() + conv4, indices4 = self.conv_block4(conv3) + size4 = conv4.size() + conv5, indices5 = self.conv_block5(conv4) + + conv6 = self.conv_block6(conv5) + conv7 = self.conv_block7(conv6) + conv8 = self.deconv_block8(conv7) + conv9 = self.unpool(conv8, indices5, output_size=size4) + conv10 = self.deconv_block10(conv9) + conv11 = self.unpool(conv10, indices4, output_size=size3) + conv12 = self.deconv_block12(conv11) + conv13 = self.unpool(conv12, indices3, output_size=size2) + conv14 = self.deconv_block14(conv13) + conv15 = self.unpool(conv14, indices2, output_size=size1) + conv16 = self.deconv_block16(conv15) + conv17 = self.unpool(conv16, indices1, output_size=size0) + conv18 = self.deconv_block18(conv17) + out = self.seg_score19(conv18) + + return out + + def init_vgg16_params(self, vgg16, copy_fc8=True): + blocks = [ + self.conv_block1, + self.conv_block2, + self.conv_block3, + self.conv_block4, + self.conv_block5, + ] + + ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]] + features = list(vgg16.features.children()) + i_layer = 0 + # copy convolutional filters from vgg16 + for idx, conv_block in enumerate(blocks): + for l1, l2 in zip(features[ranges[idx][0] : ranges[idx][1]], conv_block): + if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d): + if i_layer == 0: + l2.weight.data = ( + (l1.weight.data[:, 0, :, :] + l1.weight.data[:, 1, :, :] + l1.weight.data[:, 2, :, :]) / 3.0 + ).view(l2.weight.size()) + l2.bias.data = l1.bias.data + i_layer = i_layer + 1 + else: + assert l1.weight.size() == l2.weight.size() + assert l1.bias.size() == l2.bias.size() + l2.weight.data = l1.weight.data + l2.bias.data = l1.bias.data + i_layer = i_layer + 1 + + +def get_seg_model(cfg, **kwargs): + assert ( + cfg.MODEL.IN_CHANNELS == 1 + ), f"Patch deconvnet is not implemented to accept {cfg.MODEL.IN_CHANNELS} channels. Please only pass 1 for cfg.MODEL.IN_CHANNELS" + model = patch_deconvnet(n_classes=cfg.DATASET.NUM_CLASSES) + + return model diff --git a/cv_lib/cv_lib/segmentation/models/patch_deconvnet_skip.py b/cv_lib/cv_lib/segmentation/models/patch_deconvnet_skip.py new file mode 100644 index 00000000..d5506b84 --- /dev/null +++ b/cv_lib/cv_lib/segmentation/models/patch_deconvnet_skip.py @@ -0,0 +1,307 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import torch.nn as nn + + +class patch_deconvnet_skip(nn.Module): + def __init__(self, n_classes=4, learned_billinear=False): + super(patch_deconvnet_skip, self).__init__() + self.learned_billinear = learned_billinear + self.n_classes = n_classes + self.unpool = nn.MaxUnpool2d(2, stride=2) + self.conv_block1 = nn.Sequential( + # conv1_1 + nn.Conv2d(1, 64, 3, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv1_2 + nn.Conv2d(64, 64, 3, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool1 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_1 + + # 48*48 + + self.conv_block2 = nn.Sequential( + # conv2_1 + nn.Conv2d(64, 128, 3, padding=1), + nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv2_2 + nn.Conv2d(128, 128, 3, padding=1), + nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool2 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_2 + + # 24*24 + + self.conv_block3 = nn.Sequential( + # conv3_1 + nn.Conv2d(128, 256, 3, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv3_2 + nn.Conv2d(256, 256, 3, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv3_3 + nn.Conv2d(256, 256, 3, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool3 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_3 + + # 12*12 + + self.conv_block4 = nn.Sequential( + # conv4_1 + nn.Conv2d(256, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv4_2 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv4_3 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool4 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_4 + + # 6*6 + + self.conv_block5 = nn.Sequential( + # conv5_1 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv5_2 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv5_3 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool5 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_5 + + # 3*3 + + self.conv_block6 = nn.Sequential( + # fc6 + nn.Conv2d(512, 4096, 3), + # set the filter size and nor padding to make output into 1*1 + nn.BatchNorm2d(4096, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + # 1*1 + + self.conv_block7 = nn.Sequential( + # fc7 + nn.Conv2d(4096, 4096, 1), + # set the filter size to make output into 1*1 + nn.BatchNorm2d(4096, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.deconv_block8 = nn.Sequential( + # fc6-deconv + nn.ConvTranspose2d(4096, 512, 3, stride=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + # 3*3 + + self.unpool_block9 = nn.Sequential( + # unpool5 + nn.MaxUnpool2d(2, stride=2), + ) + # usage unpool(output, indices) + + # 6*6 + + self.deconv_block10 = nn.Sequential( + # deconv5_1 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv5_2 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv5_3 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.unpool_block11 = nn.Sequential( + # unpool4 + nn.MaxUnpool2d(2, stride=2), + ) + + # 12*12 + + self.deconv_block12 = nn.Sequential( + # deconv4_1 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv4_2 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv4_3 + nn.ConvTranspose2d(512, 256, 3, stride=1, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.unpool_block13 = nn.Sequential( + # unpool3 + nn.MaxUnpool2d(2, stride=2), + ) + + # 24*24 + + self.deconv_block14 = nn.Sequential( + # deconv3_1 + nn.ConvTranspose2d(256, 256, 3, stride=1, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv3_2 + nn.ConvTranspose2d(256, 256, 3, stride=1, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv3_3 + nn.ConvTranspose2d(256, 128, 3, stride=1, padding=1), + nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.unpool_block15 = nn.Sequential( + # unpool2 + nn.MaxUnpool2d(2, stride=2), + ) + + # 48*48 + + self.deconv_block16 = nn.Sequential( + # deconv2_1 + nn.ConvTranspose2d(128, 128, 3, stride=1, padding=1), + nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv2_2 + nn.ConvTranspose2d(128, 64, 3, stride=1, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.unpool_block17 = nn.Sequential( + # unpool1 + nn.MaxUnpool2d(2, stride=2), + ) + + # 96*96 + + self.deconv_block18 = nn.Sequential( + # deconv1_1 + nn.ConvTranspose2d(64, 64, 3, stride=1, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv1_2 + nn.ConvTranspose2d(64, 64, 3, stride=1, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.seg_score19 = nn.Sequential( + # seg-score + nn.Conv2d(64, self.n_classes, 1), + ) + + if self.learned_billinear: + raise NotImplementedError + + def forward(self, x): + size0 = x.size() + conv1, indices1 = self.conv_block1(x) + size1 = conv1.size() + conv2, indices2 = self.conv_block2(conv1) + size2 = conv2.size() + conv3, indices3 = self.conv_block3(conv2) + size3 = conv3.size() + conv4, indices4 = self.conv_block4(conv3) + size4 = conv4.size() + conv5, indices5 = self.conv_block5(conv4) + + conv6 = self.conv_block6(conv5) + conv7 = self.conv_block7(conv6) + conv8 = self.deconv_block8(conv7) + conv5 + conv9 = self.unpool(conv8, indices5, output_size=size4) + conv10 = self.deconv_block10(conv9) + conv4 + conv11 = self.unpool(conv10, indices4, output_size=size3) + conv12 = self.deconv_block12(conv11) + conv3 + conv13 = self.unpool(conv12, indices3, output_size=size2) + conv14 = self.deconv_block14(conv13) + conv2 + conv15 = self.unpool(conv14, indices2, output_size=size1) + conv16 = self.deconv_block16(conv15) + conv1 + conv17 = self.unpool(conv16, indices1, output_size=size0) + conv18 = self.deconv_block18(conv17) + out = self.seg_score19(conv18) + + return out + + def init_vgg16_params(self, vgg16, copy_fc8=True): + blocks = [ + self.conv_block1, + self.conv_block2, + self.conv_block3, + self.conv_block4, + self.conv_block5, + ] + + ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]] + features = list(vgg16.features.children()) + i_layer = 0 + # copy convolutional filters from vgg16 + for idx, conv_block in enumerate(blocks): + for l1, l2 in zip(features[ranges[idx][0] : ranges[idx][1]], conv_block): + if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d): + if i_layer == 0: + l2.weight.data = ( + (l1.weight.data[:, 0, :, :] + l1.weight.data[:, 1, :, :] + l1.weight.data[:, 2, :, :]) / 3.0 + ).view(l2.weight.size()) + l2.bias.data = l1.bias.data + i_layer = i_layer + 1 + else: + assert l1.weight.size() == l2.weight.size() + assert l1.bias.size() == l2.bias.size() + l2.weight.data = l1.weight.data + l2.bias.data = l1.bias.data + i_layer = i_layer + 1 + + +def get_seg_model(cfg, **kwargs): + assert ( + cfg.MODEL.IN_CHANNELS == 1 + ), f"Patch deconvnet is not implemented to accept {cfg.MODEL.IN_CHANNELS} channels. Please only pass 1 for cfg.MODEL.IN_CHANNELS" + model = patch_deconvnet_skip(n_classes=cfg.DATASET.NUM_CLASSES) + return model diff --git a/cv_lib/cv_lib/segmentation/models/resnet_unet.py b/cv_lib/cv_lib/segmentation/models/resnet_unet.py new file mode 100644 index 00000000..05badb64 --- /dev/null +++ b/cv_lib/cv_lib/segmentation/models/resnet_unet.py @@ -0,0 +1,365 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision + + +class FPAv2(nn.Module): + def __init__(self, input_dim, output_dim): + super(FPAv2, self).__init__() + self.glob = nn.Sequential(nn.AdaptiveAvgPool2d(1), nn.Conv2d(input_dim, output_dim, kernel_size=1, bias=False),) + + self.down2_1 = nn.Sequential( + nn.Conv2d(input_dim, input_dim, kernel_size=5, stride=2, padding=2, bias=False), + nn.BatchNorm2d(input_dim), + nn.ELU(True), + ) + self.down2_2 = nn.Sequential( + nn.Conv2d(input_dim, output_dim, kernel_size=5, padding=2, bias=False), + nn.BatchNorm2d(output_dim), + nn.ELU(True), + ) + + self.down3_1 = nn.Sequential( + nn.Conv2d(input_dim, input_dim, kernel_size=3, stride=2, padding=1, bias=False), + nn.BatchNorm2d(input_dim), + nn.ELU(True), + ) + self.down3_2 = nn.Sequential( + nn.Conv2d(input_dim, output_dim, kernel_size=3, padding=1, bias=False), + nn.BatchNorm2d(output_dim), + nn.ELU(True), + ) + + self.conv1 = nn.Sequential( + nn.Conv2d(input_dim, output_dim, kernel_size=1, bias=False), nn.BatchNorm2d(output_dim), nn.ELU(True), + ) + + def forward(self, x): + # x shape: 512, 16, 16 + x_glob = self.glob(x) # 256, 1, 1 + x_glob = F.upsample(x_glob, scale_factor=16, mode="bilinear", align_corners=True) # 256, 16, 16 + + d2 = self.down2_1(x) # 512, 8, 8 + d3 = self.down3_1(d2) # 512, 4, 4 + + d2 = self.down2_2(d2) # 256, 8, 8 + d3 = self.down3_2(d3) # 256, 4, 4 + + d3 = F.upsample(d3, scale_factor=2, mode="bilinear", align_corners=True) # 256, 8, 8 + d2 = d2 + d3 + + d2 = F.upsample(d2, scale_factor=2, mode="bilinear", align_corners=True) # 256, 16, 16 + x = self.conv1(x) # 256, 16, 16 + x = x * d2 + + x = x + x_glob + + return x + + +def conv3x3(input_dim, output_dim, rate=1): + return nn.Sequential( + nn.Conv2d(input_dim, output_dim, kernel_size=3, dilation=rate, padding=rate, bias=False,), + nn.BatchNorm2d(output_dim), + nn.ELU(True), + ) + + +class SpatialAttention2d(nn.Module): + def __init__(self, channel): + super(SpatialAttention2d, self).__init__() + self.squeeze = nn.Conv2d(channel, 1, kernel_size=1, bias=False) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + z = self.squeeze(x) + z = self.sigmoid(z) + return x * z + + +class GAB(nn.Module): + def __init__(self, input_dim, reduction=4): + super(GAB, self).__init__() + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.conv1 = nn.Conv2d(input_dim, input_dim // reduction, kernel_size=1, stride=1) + self.conv2 = nn.Conv2d(input_dim // reduction, input_dim, kernel_size=1, stride=1) + self.relu = nn.ReLU(inplace=True) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + z = self.global_avgpool(x) + z = self.relu(self.conv1(z)) + z = self.sigmoid(self.conv2(z)) + return x * z + + +class Decoder(nn.Module): + def __init__(self, in_channels, channels, out_channels): + super(Decoder, self).__init__() + self.conv1 = conv3x3(in_channels, channels) + self.conv2 = conv3x3(channels, out_channels) + self.s_att = SpatialAttention2d(out_channels) + self.c_att = GAB(out_channels, 16) + + def forward(self, x, e=None): + x = F.upsample(input=x, scale_factor=2, mode="bilinear", align_corners=True) + if e is not None: + x = torch.cat([x, e], 1) + x = self.conv1(x) + x = self.conv2(x) + s = self.s_att(x) + c = self.c_att(x) + output = s + c + return output + + +class Decoderv2(nn.Module): + def __init__(self, up_in, x_in, n_out): + super(Decoderv2, self).__init__() + up_out = x_out = n_out // 2 + self.x_conv = nn.Conv2d(x_in, x_out, 1, bias=False) + self.tr_conv = nn.ConvTranspose2d(up_in, up_out, 2, stride=2) + self.bn = nn.BatchNorm2d(n_out) + self.relu = nn.ReLU(True) + self.s_att = SpatialAttention2d(n_out) + self.c_att = GAB(n_out, 16) + + def forward(self, up_p, x_p): + up_p = self.tr_conv(up_p) + x_p = self.x_conv(x_p) + + cat_p = torch.cat([up_p, x_p], 1) + cat_p = self.relu(self.bn(cat_p)) + s = self.s_att(cat_p) + c = self.c_att(cat_p) + return s + c + + +class SCse(nn.Module): + def __init__(self, dim): + super(SCse, self).__init__() + self.satt = SpatialAttention2d(dim) + self.catt = GAB(dim) + + def forward(self, x): + return self.satt(x) + self.catt(x) + + +# stage1 model +class Res34Unetv4(nn.Module): + def __init__(self, n_classes=1): + super(Res34Unetv4, self).__init__() + self.resnet = torchvision.models.resnet34(True) + + self.conv1 = nn.Sequential(self.resnet.conv1, self.resnet.bn1, self.resnet.relu) + + self.encode2 = nn.Sequential(self.resnet.layer1, SCse(64)) + self.encode3 = nn.Sequential(self.resnet.layer2, SCse(128)) + self.encode4 = nn.Sequential(self.resnet.layer3, SCse(256)) + self.encode5 = nn.Sequential(self.resnet.layer4, SCse(512)) + + self.center = nn.Sequential(FPAv2(512, 256), nn.MaxPool2d(2, 2)) + + self.decode5 = Decoderv2(256, 512, 64) + self.decode4 = Decoderv2(64, 256, 64) + self.decode3 = Decoderv2(64, 128, 64) + self.decode2 = Decoderv2(64, 64, 64) + self.decode1 = Decoder(64, 32, 64) + + self.logit = nn.Sequential( + nn.Conv2d(320, 64, kernel_size=3, padding=1), + nn.ELU(True), + nn.Conv2d(64, n_classes, kernel_size=1, bias=False), + ) + + def forward(self, x): + # x: (batch_size, 3, 256, 256) + + x = self.conv1(x) # 64, 128, 128 + e2 = self.encode2(x) # 64, 128, 128 + e3 = self.encode3(e2) # 128, 64, 64 + e4 = self.encode4(e3) # 256, 32, 32 + e5 = self.encode5(e4) # 512, 16, 16 + + f = self.center(e5) # 256, 8, 8 + + d5 = self.decode5(f, e5) # 64, 16, 16 + d4 = self.decode4(d5, e4) # 64, 32, 32 + d3 = self.decode3(d4, e3) # 64, 64, 64 + d2 = self.decode2(d3, e2) # 64, 128, 128 + d1 = self.decode1(d2) # 64, 256, 256 + + f = torch.cat( + ( + d1, + F.upsample(d2, scale_factor=2, mode="bilinear", align_corners=True), + F.upsample(d3, scale_factor=4, mode="bilinear", align_corners=True), + F.upsample(d4, scale_factor=8, mode="bilinear", align_corners=True), + F.upsample(d5, scale_factor=16, mode="bilinear", align_corners=True), + ), + 1, + ) # 320, 256, 256 + + logit = self.logit(f) # 1, 256, 256 + + return logit + + +# stage2 model +class Res34Unetv3(nn.Module): + def __init__(self): + super(Res34Unetv3, self).__init__() + self.resnet = torchvision.models.resnet34(True) + + self.conv1 = nn.Sequential(self.resnet.conv1, self.resnet.bn1, self.resnet.relu) + + self.encode2 = nn.Sequential(self.resnet.layer1, SCse(64)) + self.encode3 = nn.Sequential(self.resnet.layer2, SCse(128)) + self.encode4 = nn.Sequential(self.resnet.layer3, SCse(256)) + self.encode5 = nn.Sequential(self.resnet.layer4, SCse(512)) + + self.center = nn.Sequential(FPAv2(512, 256), nn.MaxPool2d(2, 2)) + + self.decode5 = Decoderv2(256, 512, 64) + self.decode4 = Decoderv2(64, 256, 64) + self.decode3 = Decoderv2(64, 128, 64) + self.decode2 = Decoderv2(64, 64, 64) + self.decode1 = Decoder(64, 32, 64) + + self.dropout2d = nn.Dropout2d(0.4) + self.dropout = nn.Dropout(0.4) + + self.fuse_pixel = conv3x3(320, 64) + self.logit_pixel = nn.Conv2d(64, 1, kernel_size=1, bias=False) + + self.fuse_image = nn.Sequential(nn.Linear(512, 64), nn.ELU(True)) + self.logit_image = nn.Sequential(nn.Linear(64, 1), nn.Sigmoid()) + self.logit = nn.Sequential( + nn.Conv2d(128, 64, kernel_size=3, padding=1, bias=False), + nn.ELU(True), + nn.Conv2d(64, 1, kernel_size=1, bias=False), + ) + + def forward(self, x): + # x: (batch_size, 3, 256, 256) + batch_size, c, h, w = x.shape + + x = self.conv1(x) # 64, 128, 128 + e2 = self.encode2(x) # 64, 128, 128 + e3 = self.encode3(e2) # 128, 64, 64 + e4 = self.encode4(e3) # 256, 32, 32 + e5 = self.encode5(e4) # 512, 16, 16 + + e = F.adaptive_avg_pool2d(e5, output_size=1).view(batch_size, -1) # 512 + e = self.dropout(e) + + f = self.center(e5) # 256, 8, 8 + + d5 = self.decode5(f, e5) # 64, 16, 16 + d4 = self.decode4(d5, e4) # 64, 32, 32 + d3 = self.decode3(d4, e3) # 64, 64, 64 + d2 = self.decode2(d3, e2) # 64, 128, 128 + d1 = self.decode1(d2) # 64, 256, 256 + + f = torch.cat( + ( + d1, + F.upsample(d2, scale_factor=2, mode="bilinear", align_corners=True), + F.upsample(d3, scale_factor=4, mode="bilinear", align_corners=True), + F.upsample(d4, scale_factor=8, mode="bilinear", align_corners=True), + F.upsample(d5, scale_factor=16, mode="bilinear", align_corners=True), + ), + 1, + ) # 320, 256, 256 + f = self.dropout2d(f) + + # segmentation process + fuse_pixel = self.fuse_pixel(f) # 64, 256, 256 + logit_pixel = self.logit_pixel(fuse_pixel) # 1, 256, 256 + + # classification process + fuse_image = self.fuse_image(e) # 64 + logit_image = self.logit_image(fuse_image) # 1 + + # combine segmentation and classification + fuse = torch.cat( + [ + fuse_pixel, + F.upsample( + fuse_image.view(batch_size, -1, 1, 1), scale_factor=256, mode="bilinear", align_corners=True, + ), + ], + 1, + ) # 128, 256, 256 + logit = self.logit(fuse) # 1, 256, 256 + + return logit, logit_pixel, logit_image.view(-1) + + +# stage3 model +class Res34Unetv5(nn.Module): + def __init__(self): + super(Res34Unetv5, self).__init__() + self.resnet = torchvision.models.resnet34(True) + + self.conv1 = nn.Sequential( + nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False), self.resnet.bn1, self.resnet.relu, + ) + + self.encode2 = nn.Sequential(self.resnet.layer1, SCse(64)) + self.encode3 = nn.Sequential(self.resnet.layer2, SCse(128)) + self.encode4 = nn.Sequential(self.resnet.layer3, SCse(256)) + self.encode5 = nn.Sequential(self.resnet.layer4, SCse(512)) + + self.center = nn.Sequential(FPAv2(512, 256), nn.MaxPool2d(2, 2)) + + self.decode5 = Decoderv2(256, 512, 64) + self.decode4 = Decoderv2(64, 256, 64) + self.decode3 = Decoderv2(64, 128, 64) + self.decode2 = Decoderv2(64, 64, 64) + + self.logit = nn.Sequential( + nn.Conv2d(256, 32, kernel_size=3, padding=1), nn.ELU(True), nn.Conv2d(32, 1, kernel_size=1, bias=False), + ) + + def forward(self, x): + # x: batch_size, 3, 128, 128 + x = self.conv1(x) # 64, 128, 128 + e2 = self.encode2(x) # 64, 128, 128 + e3 = self.encode3(e2) # 128, 64, 64 + e4 = self.encode4(e3) # 256, 32, 32 + e5 = self.encode5(e4) # 512, 16, 16 + + f = self.center(e5) # 256, 8, 8 + + d5 = self.decode5(f, e5) # 64, 16, 16 + d4 = self.decode4(d5, e4) # 64, 32, 32 + d3 = self.decode3(d4, e3) # 64, 64, 64 + d2 = self.decode2(d3, e2) # 64, 128, 128 + + f = torch.cat( + ( + d2, + F.upsample(d3, scale_factor=2, mode="bilinear", align_corners=True), + F.upsample(d4, scale_factor=4, mode="bilinear", align_corners=True), + F.upsample(d5, scale_factor=8, mode="bilinear", align_corners=True), + ), + 1, + ) # 256, 128, 128 + + f = F.dropout2d(f, p=0.4) + logit = self.logit(f) # 1, 128, 128 + + return logit + + +def get_seg_model(cfg, **kwargs): + assert ( + cfg.MODEL.IN_CHANNELS == 3 + ), f"SEResnet Unet deconvnet is not implemented to accept {cfg.MODEL.IN_CHANNELS} channels. Please only pass 3 for cfg.MODEL.IN_CHANNELS" + model = Res34Unetv4(n_classes=cfg.DATASET.NUM_CLASSES) + return model diff --git a/cv_lib/cv_lib/segmentation/models/section_deconvnet.py b/cv_lib/cv_lib/segmentation/models/section_deconvnet.py new file mode 100644 index 00000000..7234b1ee --- /dev/null +++ b/cv_lib/cv_lib/segmentation/models/section_deconvnet.py @@ -0,0 +1,307 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import torch.nn as nn + + +class section_deconvnet(nn.Module): + def __init__(self, n_classes=4, learned_billinear=False): + super(section_deconvnet, self).__init__() + self.learned_billinear = learned_billinear + self.n_classes = n_classes + self.unpool = nn.MaxUnpool2d(2, stride=2) + self.conv_block1 = nn.Sequential( + # conv1_1 + nn.Conv2d(1, 64, 3, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv1_2 + nn.Conv2d(64, 64, 3, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool1 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_1 + + # 48*48 + + self.conv_block2 = nn.Sequential( + # conv2_1 + nn.Conv2d(64, 128, 3, padding=1), + nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv2_2 + nn.Conv2d(128, 128, 3, padding=1), + nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool2 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_2 + + # 24*24 + + self.conv_block3 = nn.Sequential( + # conv3_1 + nn.Conv2d(128, 256, 3, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv3_2 + nn.Conv2d(256, 256, 3, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv3_3 + nn.Conv2d(256, 256, 3, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool3 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_3 + + # 12*12 + + self.conv_block4 = nn.Sequential( + # conv4_1 + nn.Conv2d(256, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv4_2 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv4_3 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool4 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_4 + + # 6*6 + + self.conv_block5 = nn.Sequential( + # conv5_1 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv5_2 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv5_3 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool5 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_5 + + # 3*3 + + self.conv_block6 = nn.Sequential( + # fc6 + nn.Conv2d(512, 4096, 3), + # set the filter size and nor padding to make output into 1*1 + nn.BatchNorm2d(4096, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + # 1*1 + + self.conv_block7 = nn.Sequential( + # fc7 + nn.Conv2d(4096, 4096, 1), + # set the filter size to make output into 1*1 + nn.BatchNorm2d(4096, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.deconv_block8 = nn.Sequential( + # fc6-deconv + nn.ConvTranspose2d(4096, 512, 3, stride=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + # 3*3 + + self.unpool_block9 = nn.Sequential( + # unpool5 + nn.MaxUnpool2d(2, stride=2), + ) + # usage unpool(output, indices) + + # 6*6 + + self.deconv_block10 = nn.Sequential( + # deconv5_1 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv5_2 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv5_3 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.unpool_block11 = nn.Sequential( + # unpool4 + nn.MaxUnpool2d(2, stride=2), + ) + + # 12*12 + + self.deconv_block12 = nn.Sequential( + # deconv4_1 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv4_2 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv4_3 + nn.ConvTranspose2d(512, 256, 3, stride=1, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.unpool_block13 = nn.Sequential( + # unpool3 + nn.MaxUnpool2d(2, stride=2), + ) + + # 24*24 + + self.deconv_block14 = nn.Sequential( + # deconv3_1 + nn.ConvTranspose2d(256, 256, 3, stride=1, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv3_2 + nn.ConvTranspose2d(256, 256, 3, stride=1, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv3_3 + nn.ConvTranspose2d(256, 128, 3, stride=1, padding=1), + nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.unpool_block15 = nn.Sequential( + # unpool2 + nn.MaxUnpool2d(2, stride=2), + ) + + # 48*48 + + self.deconv_block16 = nn.Sequential( + # deconv2_1 + nn.ConvTranspose2d(128, 128, 3, stride=1, padding=1), + nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv2_2 + nn.ConvTranspose2d(128, 64, 3, stride=1, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.unpool_block17 = nn.Sequential( + # unpool1 + nn.MaxUnpool2d(2, stride=2), + ) + + # 96*96 + + self.deconv_block18 = nn.Sequential( + # deconv1_1 + nn.ConvTranspose2d(64, 64, 3, stride=1, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv1_2 + nn.ConvTranspose2d(64, 64, 3, stride=1, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.seg_score19 = nn.Sequential( + # seg-score + nn.Conv2d(64, self.n_classes, 1), + ) + + if self.learned_billinear: + raise NotImplementedError + + def forward(self, x): + size0 = x.size() + conv1, indices1 = self.conv_block1(x) + size1 = conv1.size() + conv2, indices2 = self.conv_block2(conv1) + size2 = conv2.size() + conv3, indices3 = self.conv_block3(conv2) + size3 = conv3.size() + conv4, indices4 = self.conv_block4(conv3) + size4 = conv4.size() + conv5, indices5 = self.conv_block5(conv4) + + conv6 = self.conv_block6(conv5) + conv7 = self.conv_block7(conv6) + conv8 = self.deconv_block8(conv7) + conv9 = self.unpool(conv8, indices5, output_size=size4) + conv10 = self.deconv_block10(conv9) + conv11 = self.unpool(conv10, indices4, output_size=size3) + conv12 = self.deconv_block12(conv11) + conv13 = self.unpool(conv12, indices3, output_size=size2) + conv14 = self.deconv_block14(conv13) + conv15 = self.unpool(conv14, indices2, output_size=size1) + conv16 = self.deconv_block16(conv15) + conv17 = self.unpool(conv16, indices1, output_size=size0) + conv18 = self.deconv_block18(conv17) + out = self.seg_score19(conv18) + + return out + + def init_vgg16_params(self, vgg16, copy_fc8=True): + blocks = [ + self.conv_block1, + self.conv_block2, + self.conv_block3, + self.conv_block4, + self.conv_block5, + ] + + ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]] + features = list(vgg16.features.children()) + i_layer = 0 + # copy convolutional filters from vgg16 + for idx, conv_block in enumerate(blocks): + for l1, l2 in zip(features[ranges[idx][0] : ranges[idx][1]], conv_block): + if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d): + if i_layer == 0: + l2.weight.data = ( + (l1.weight.data[:, 0, :, :] + l1.weight.data[:, 1, :, :] + l1.weight.data[:, 2, :, :]) / 3.0 + ).view(l2.weight.size()) + l2.bias.data = l1.bias.data + i_layer = i_layer + 1 + else: + assert l1.weight.size() == l2.weight.size() + assert l1.bias.size() == l2.bias.size() + l2.weight.data = l1.weight.data + l2.bias.data = l1.bias.data + i_layer = i_layer + 1 + + +def get_seg_model(cfg, **kwargs): + assert ( + cfg.MODEL.IN_CHANNELS == 1 + ), f"Section deconvnet is not implemented to accept {cfg.MODEL.IN_CHANNELS} channels. Please only pass 1 for cfg.MODEL.IN_CHANNELS" + model = section_deconvnet(n_classes=cfg.DATASET.NUM_CLASSES) + return model diff --git a/cv_lib/cv_lib/segmentation/models/section_deconvnet_skip.py b/cv_lib/cv_lib/segmentation/models/section_deconvnet_skip.py new file mode 100644 index 00000000..cb8b2ecb --- /dev/null +++ b/cv_lib/cv_lib/segmentation/models/section_deconvnet_skip.py @@ -0,0 +1,307 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import torch.nn as nn + + +class section_deconvnet_skip(nn.Module): + def __init__(self, n_classes=4, learned_billinear=False): + super(section_deconvnet_skip, self).__init__() + self.learned_billinear = learned_billinear + self.n_classes = n_classes + self.unpool = nn.MaxUnpool2d(2, stride=2) + self.conv_block1 = nn.Sequential( + # conv1_1 + nn.Conv2d(1, 64, 3, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv1_2 + nn.Conv2d(64, 64, 3, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool1 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_1 + + # 48*48 + + self.conv_block2 = nn.Sequential( + # conv2_1 + nn.Conv2d(64, 128, 3, padding=1), + nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv2_2 + nn.Conv2d(128, 128, 3, padding=1), + nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool2 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_2 + + # 24*24 + + self.conv_block3 = nn.Sequential( + # conv3_1 + nn.Conv2d(128, 256, 3, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv3_2 + nn.Conv2d(256, 256, 3, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv3_3 + nn.Conv2d(256, 256, 3, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool3 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_3 + + # 12*12 + + self.conv_block4 = nn.Sequential( + # conv4_1 + nn.Conv2d(256, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv4_2 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv4_3 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool4 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_4 + + # 6*6 + + self.conv_block5 = nn.Sequential( + # conv5_1 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv5_2 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # conv5_3 + nn.Conv2d(512, 512, 3, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # pool5 + nn.MaxPool2d(2, stride=2, return_indices=True, ceil_mode=True), + ) + # it returns outputs and pool_indices_5 + + # 3*3 + + self.conv_block6 = nn.Sequential( + # fc6 + nn.Conv2d(512, 4096, 3), + # set the filter size and nor padding to make output into 1*1 + nn.BatchNorm2d(4096, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + # 1*1 + + self.conv_block7 = nn.Sequential( + # fc7 + nn.Conv2d(4096, 4096, 1), + # set the filter size to make output into 1*1 + nn.BatchNorm2d(4096, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.deconv_block8 = nn.Sequential( + # fc6-deconv + nn.ConvTranspose2d(4096, 512, 3, stride=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + # 3*3 + + self.unpool_block9 = nn.Sequential( + # unpool5 + nn.MaxUnpool2d(2, stride=2), + ) + # usage unpool(output, indices) + + # 6*6 + + self.deconv_block10 = nn.Sequential( + # deconv5_1 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv5_2 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv5_3 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.unpool_block11 = nn.Sequential( + # unpool4 + nn.MaxUnpool2d(2, stride=2), + ) + + # 12*12 + + self.deconv_block12 = nn.Sequential( + # deconv4_1 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv4_2 + nn.ConvTranspose2d(512, 512, 3, stride=1, padding=1), + nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv4_3 + nn.ConvTranspose2d(512, 256, 3, stride=1, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.unpool_block13 = nn.Sequential( + # unpool3 + nn.MaxUnpool2d(2, stride=2), + ) + + # 24*24 + + self.deconv_block14 = nn.Sequential( + # deconv3_1 + nn.ConvTranspose2d(256, 256, 3, stride=1, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv3_2 + nn.ConvTranspose2d(256, 256, 3, stride=1, padding=1), + nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv3_3 + nn.ConvTranspose2d(256, 128, 3, stride=1, padding=1), + nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.unpool_block15 = nn.Sequential( + # unpool2 + nn.MaxUnpool2d(2, stride=2), + ) + + # 48*48 + + self.deconv_block16 = nn.Sequential( + # deconv2_1 + nn.ConvTranspose2d(128, 128, 3, stride=1, padding=1), + nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv2_2 + nn.ConvTranspose2d(128, 64, 3, stride=1, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.unpool_block17 = nn.Sequential( + # unpool1 + nn.MaxUnpool2d(2, stride=2), + ) + + # 96*96 + + self.deconv_block18 = nn.Sequential( + # deconv1_1 + nn.ConvTranspose2d(64, 64, 3, stride=1, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + # deconv1_2 + nn.ConvTranspose2d(64, 64, 3, stride=1, padding=1), + nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True), + nn.ReLU(inplace=True), + ) + + self.seg_score19 = nn.Sequential( + # seg-score + nn.Conv2d(64, self.n_classes, 1), + ) + + if self.learned_billinear: + raise NotImplementedError + + def forward(self, x): + size0 = x.size() + conv1, indices1 = self.conv_block1(x) + size1 = conv1.size() + conv2, indices2 = self.conv_block2(conv1) + size2 = conv2.size() + conv3, indices3 = self.conv_block3(conv2) + size3 = conv3.size() + conv4, indices4 = self.conv_block4(conv3) + size4 = conv4.size() + conv5, indices5 = self.conv_block5(conv4) + + conv6 = self.conv_block6(conv5) + conv7 = self.conv_block7(conv6) + conv8 = self.deconv_block8(conv7) + conv5 + conv9 = self.unpool(conv8, indices5, output_size=size4) + conv10 = self.deconv_block10(conv9) + conv4 + conv11 = self.unpool(conv10, indices4, output_size=size3) + conv12 = self.deconv_block12(conv11) + conv3 + conv13 = self.unpool(conv12, indices3, output_size=size2) + conv14 = self.deconv_block14(conv13) + conv2 + conv15 = self.unpool(conv14, indices2, output_size=size1) + conv16 = self.deconv_block16(conv15) + conv1 + conv17 = self.unpool(conv16, indices1, output_size=size0) + conv18 = self.deconv_block18(conv17) + out = self.seg_score19(conv18) + + return out + + def init_vgg16_params(self, vgg16, copy_fc8=True): + blocks = [ + self.conv_block1, + self.conv_block2, + self.conv_block3, + self.conv_block4, + self.conv_block5, + ] + + ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]] + features = list(vgg16.features.children()) + i_layer = 0 + # copy convolutional filters from vgg16 + for idx, conv_block in enumerate(blocks): + for l1, l2 in zip(features[ranges[idx][0] : ranges[idx][1]], conv_block): + if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d): + if i_layer == 0: + l2.weight.data = ( + (l1.weight.data[:, 0, :, :] + l1.weight.data[:, 1, :, :] + l1.weight.data[:, 2, :, :]) / 3.0 + ).view(l2.weight.size()) + l2.bias.data = l1.bias.data + i_layer = i_layer + 1 + else: + assert l1.weight.size() == l2.weight.size() + assert l1.bias.size() == l2.bias.size() + l2.weight.data = l1.weight.data + l2.bias.data = l1.bias.data + i_layer = i_layer + 1 + + +def get_seg_model(cfg, **kwargs): + assert ( + cfg.MODEL.IN_CHANNELS == 1 + ), f"Section deconvnet is not implemented to accept {cfg.MODEL.IN_CHANNELS} channels. Please only pass 1 for cfg.MODEL.IN_CHANNELS" + model = section_deconvnet_skip(n_classes=cfg.DATASET.NUM_CLASSES) + return model diff --git a/cv_lib/cv_lib/segmentation/models/seg_hrnet.py b/cv_lib/cv_lib/segmentation/models/seg_hrnet.py new file mode 100644 index 00000000..dd06118e --- /dev/null +++ b/cv_lib/cv_lib/segmentation/models/seg_hrnet.py @@ -0,0 +1,446 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Ke Sun (sunk@mail.ustc.edu.cn) +# ------------------------------------------------------------------------------ +"""HRNET for segmentation taken from https://github.com/HRNet/HRNet-Semantic-Segmentation +pytorch-v1.1 branch +hash: 06142dc1c7026e256a7561c3e875b06622b5670f + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging +import os + +import numpy as np +import torch +import torch._utils +import torch.nn as nn +import torch.nn.functional as F + +BatchNorm2d = nn.BatchNorm2d +BN_MOMENTUM = 0.1 +logger = logging.getLogger(__name__) + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) + self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM) + self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False) + self.bn3 = BatchNorm2d(planes * self.expansion, momentum=BN_MOMENTUM) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class HighResolutionModule(nn.Module): + def __init__( + self, num_branches, blocks, num_blocks, num_inchannels, num_channels, fuse_method, multi_scale_output=True, + ): + super(HighResolutionModule, self).__init__() + self._check_branches(num_branches, blocks, num_blocks, num_inchannels, num_channels) + + self.num_inchannels = num_inchannels + self.fuse_method = fuse_method + self.num_branches = num_branches + + self.multi_scale_output = multi_scale_output + + self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels) + self.fuse_layers = self._make_fuse_layers() + self.relu = nn.ReLU(inplace=True) + + def _check_branches(self, num_branches, blocks, num_blocks, num_inchannels, num_channels): + if num_branches != len(num_blocks): + error_msg = "NUM_BRANCHES({}) <> NUM_BLOCKS({})".format(num_branches, len(num_blocks)) + logger.error(error_msg) + raise ValueError(error_msg) + + if num_branches != len(num_channels): + error_msg = "NUM_BRANCHES({}) <> NUM_CHANNELS({})".format(num_branches, len(num_channels)) + logger.error(error_msg) + raise ValueError(error_msg) + + if num_branches != len(num_inchannels): + error_msg = "NUM_BRANCHES({}) <> NUM_INCHANNELS({})".format(num_branches, len(num_inchannels)) + logger.error(error_msg) + raise ValueError(error_msg) + + def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1): + downsample = None + if stride != 1 or self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.num_inchannels[branch_index], + num_channels[branch_index] * block.expansion, + kernel_size=1, + stride=stride, + bias=False, + ), + BatchNorm2d(num_channels[branch_index] * block.expansion, momentum=BN_MOMENTUM), + ) + + layers = [] + layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index], stride, downsample,)) + self.num_inchannels[branch_index] = num_channels[branch_index] * block.expansion + for i in range(1, num_blocks[branch_index]): + layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index])) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + branches = [] + + for i in range(num_branches): + branches.append(self._make_one_branch(i, block, num_blocks, num_channels)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self): + if self.num_branches == 1: + return None + + num_branches = self.num_branches + num_inchannels = self.num_inchannels + fuse_layers = [] + for i in range(num_branches if self.multi_scale_output else 1): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append( + nn.Sequential( + nn.Conv2d(num_inchannels[j], num_inchannels[i], 1, 1, 0, bias=False,), + BatchNorm2d(num_inchannels[i], momentum=BN_MOMENTUM), + ) + ) + elif j == i: + fuse_layer.append(None) + else: + conv3x3s = [] + for k in range(i - j): + if k == i - j - 1: + num_outchannels_conv3x3 = num_inchannels[i] + conv3x3s.append( + nn.Sequential( + nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False,), + BatchNorm2d(num_outchannels_conv3x3, momentum=BN_MOMENTUM), + ) + ) + else: + num_outchannels_conv3x3 = num_inchannels[j] + conv3x3s.append( + nn.Sequential( + nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False,), + BatchNorm2d(num_outchannels_conv3x3, momentum=BN_MOMENTUM), + nn.ReLU(inplace=True), + ) + ) + fuse_layer.append(nn.Sequential(*conv3x3s)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def get_num_inchannels(self): + return self.num_inchannels + + def forward(self, x): + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i in range(self.num_branches): + x[i] = self.branches[i](x[i]) + + x_fuse = [] + for i in range(len(self.fuse_layers)): + y = x[0] if i == 0 else self.fuse_layers[i][0](x[0]) + for j in range(1, self.num_branches): + if i == j: + y = y + x[j] + elif j > i: + width_output = x[i].shape[-1] + height_output = x[i].shape[-2] + y = y + F.interpolate( + self.fuse_layers[i][j](x[j]), size=[height_output, width_output], mode="bilinear", + ) + else: + y = y + self.fuse_layers[i][j](x[j]) + x_fuse.append(self.relu(y)) + + return x_fuse + + +blocks_dict = {"BASIC": BasicBlock, "BOTTLENECK": Bottleneck} + + +class HighResolutionNet(nn.Module): + def __init__(self, config, **kwargs): + extra = config.MODEL.EXTRA + super(HighResolutionNet, self).__init__() + + # stem net + self.conv1 = nn.Conv2d(config.MODEL.IN_CHANNELS, 64, kernel_size=3, stride=2, padding=1, bias=False) + self.bn1 = BatchNorm2d(64, momentum=BN_MOMENTUM) + self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False) + self.bn2 = BatchNorm2d(64, momentum=BN_MOMENTUM) + self.relu = nn.ReLU(inplace=True) + + self.layer1 = self._make_layer(Bottleneck, 64, 64, 4) + + self.stage2_cfg = extra["STAGE2"] + num_channels = self.stage2_cfg["NUM_CHANNELS"] + block = blocks_dict[self.stage2_cfg["BLOCK"]] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition1 = self._make_transition_layer([256], num_channels) + self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels) + + self.stage3_cfg = extra["STAGE3"] + num_channels = self.stage3_cfg["NUM_CHANNELS"] + block = blocks_dict[self.stage3_cfg["BLOCK"]] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) + self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels) + + self.stage4_cfg = extra["STAGE4"] + num_channels = self.stage4_cfg["NUM_CHANNELS"] + block = blocks_dict[self.stage4_cfg["BLOCK"]] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) + self.stage4, pre_stage_channels = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True) + + last_inp_channels = np.int(np.sum(pre_stage_channels)) + + self.last_layer = nn.Sequential( + nn.Conv2d( + in_channels=last_inp_channels, out_channels=last_inp_channels, kernel_size=1, stride=1, padding=0, + ), + BatchNorm2d(last_inp_channels, momentum=BN_MOMENTUM), + nn.ReLU(inplace=True), + nn.Conv2d( + in_channels=last_inp_channels, + out_channels=config.DATASET.NUM_CLASSES, + kernel_size=extra.FINAL_CONV_KERNEL, + stride=1, + padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0, + ), + ) + + def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append( + nn.Sequential( + nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False,), + BatchNorm2d(num_channels_cur_layer[i], momentum=BN_MOMENTUM), + nn.ReLU(inplace=True), + ) + ) + else: + transition_layers.append(None) + else: + conv3x3s = [] + for j in range(i + 1 - num_branches_pre): + inchannels = num_channels_pre_layer[-1] + outchannels = num_channels_cur_layer[i] if j == i - num_branches_pre else inchannels + conv3x3s.append( + nn.Sequential( + nn.Conv2d(inchannels, outchannels, 3, 2, 1, bias=False), + BatchNorm2d(outchannels, momentum=BN_MOMENTUM), + nn.ReLU(inplace=True), + ) + ) + transition_layers.append(nn.Sequential(*conv3x3s)) + + return nn.ModuleList(transition_layers) + + def _make_layer(self, block, inplanes, planes, blocks, stride=1): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False,), + BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM), + ) + + layers = [] + layers.append(block(inplanes, planes, stride, downsample)) + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(inplanes, planes)) + + return nn.Sequential(*layers) + + def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True): + num_modules = layer_config["NUM_MODULES"] + num_branches = layer_config["NUM_BRANCHES"] + num_blocks = layer_config["NUM_BLOCKS"] + num_channels = layer_config["NUM_CHANNELS"] + block = blocks_dict[layer_config["BLOCK"]] + fuse_method = layer_config["FUSE_METHOD"] + + modules = [] + for i in range(num_modules): + # multi_scale_output is only used last module + if not multi_scale_output and i == num_modules - 1: + reset_multi_scale_output = False + else: + reset_multi_scale_output = True + modules.append( + HighResolutionModule( + num_branches, + block, + num_blocks, + num_inchannels, + num_channels, + fuse_method, + reset_multi_scale_output, + ) + ) + num_inchannels = modules[-1].get_num_inchannels() + + return nn.Sequential(*modules), num_inchannels + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + x = self.layer1(x) + + x_list = [] + for i in range(self.stage2_cfg["NUM_BRANCHES"]): + if self.transition1[i] is not None: + x_list.append(self.transition1[i](x)) + else: + x_list.append(x) + y_list = self.stage2(x_list) + + x_list = [] + for i in range(self.stage3_cfg["NUM_BRANCHES"]): + if self.transition2[i] is not None: + x_list.append(self.transition2[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage3(x_list) + + x_list = [] + for i in range(self.stage4_cfg["NUM_BRANCHES"]): + if self.transition3[i] is not None: + x_list.append(self.transition3[i](y_list[-1])) + else: + x_list.append(y_list[i]) + x = self.stage4(x_list) + + # Upsampling + x0_h, x0_w = x[0].size(2), x[0].size(3) + x1 = F.upsample(x[1], size=(x0_h, x0_w), mode="bilinear") + x2 = F.upsample(x[2], size=(x0_h, x0_w), mode="bilinear") + x3 = F.upsample(x[3], size=(x0_h, x0_w), mode="bilinear") + + x = torch.cat([x[0], x1, x2, x3], 1) + + x = self.last_layer(x) + + return x + + def init_weights( + self, pretrained="", + ): + logger.info("=> init weights from normal distribution") + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.normal_(m.weight, std=0.001) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + if os.path.isfile(pretrained): + pretrained_dict = torch.load(pretrained) + logger.info("=> loading pretrained model {}".format(pretrained)) + model_dict = self.state_dict() + pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict.keys()} + # for k, _ in pretrained_dict.items(): + # logger.info( + # '=> loading {} pretrained model {}'.format(k, pretrained)) + model_dict.update(pretrained_dict) + self.load_state_dict(model_dict) + + +def get_seg_model(cfg, **kwargs): + model = HighResolutionNet(cfg, **kwargs) + model.init_weights(cfg.MODEL.PRETRAINED) + + return model diff --git a/cv_lib/cv_lib/segmentation/models/unet.py b/cv_lib/cv_lib/segmentation/models/unet.py new file mode 100644 index 00000000..c6ae6813 --- /dev/null +++ b/cv_lib/cv_lib/segmentation/models/unet.py @@ -0,0 +1,116 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +""" Taken from https://github.com/milesial/Pytorch-UNet + +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class double_conv(nn.Module): + """(conv => BN => ReLU) * 2""" + + def __init__(self, in_ch, out_ch): + super(double_conv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d(in_ch, out_ch, 3, padding=1), + nn.BatchNorm2d(out_ch), + nn.ReLU(inplace=True), + nn.Conv2d(out_ch, out_ch, 3, padding=1), + nn.BatchNorm2d(out_ch), + nn.ReLU(inplace=True), + ) + + def forward(self, x): + x = self.conv(x) + return x + + +class inconv(nn.Module): + def __init__(self, in_ch, out_ch): + super(inconv, self).__init__() + self.conv = double_conv(in_ch, out_ch) + + def forward(self, x): + x = self.conv(x) + return x + + +class down(nn.Module): + def __init__(self, in_ch, out_ch): + super(down, self).__init__() + self.mpconv = nn.Sequential(nn.MaxPool2d(2), double_conv(in_ch, out_ch)) + + def forward(self, x): + x = self.mpconv(x) + return x + + +class up(nn.Module): + def __init__(self, in_ch, out_ch, bilinear=True): + super(up, self).__init__() + + if bilinear: + self.up = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True) + else: + self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, 2, stride=2) + + self.conv = double_conv(in_ch, out_ch) + + def forward(self, x1, x2): + x1 = self.up(x1) + + # input is CHW + diffY = x2.size()[2] - x1.size()[2] + diffX = x2.size()[3] - x1.size()[3] + + x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2)) + + x = torch.cat([x2, x1], dim=1) + x = self.conv(x) + return x + + +class outconv(nn.Module): + def __init__(self, in_ch, out_ch): + super(outconv, self).__init__() + self.conv = nn.Conv2d(in_ch, out_ch, 1) + + def forward(self, x): + x = self.conv(x) + return x + + +class UNet(nn.Module): + def __init__(self, n_channels, n_classes): + super(UNet, self).__init__() + self.inc = inconv(n_channels, 64) + self.down1 = down(64, 128) + self.down2 = down(128, 256) + self.down3 = down(256, 512) + self.down4 = down(512, 512) + self.up1 = up(1024, 256) + self.up2 = up(512, 128) + self.up3 = up(256, 64) + self.up4 = up(128, 64) + self.outc = outconv(64, n_classes) + + def forward(self, x): + x1 = self.inc(x) + x2 = self.down1(x1) + x3 = self.down2(x2) + x4 = self.down3(x3) + x5 = self.down4(x4) + x = self.up1(x5, x4) + x = self.up2(x, x3) + x = self.up3(x, x2) + x = self.up4(x, x1) + x = self.outc(x) + return x + + +def get_seg_model(cfg, **kwargs): + model = UNet(cfg.MODEL.IN_CHANNELS, cfg.DATASET.NUM_CLASSES) + return model diff --git a/cv_lib/cv_lib/segmentation/models/utils.py b/cv_lib/cv_lib/segmentation/models/utils.py new file mode 100644 index 00000000..70b4805f --- /dev/null +++ b/cv_lib/cv_lib/segmentation/models/utils.py @@ -0,0 +1,103 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import torch.nn as nn + + +class conv2DBatchNorm(nn.Module): + def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True, dilation=1): + super(conv2DBatchNorm, self).__init__() + + if dilation > 1: + conv_mod = nn.Conv2d( + int(in_channels), + int(n_filters), + kernel_size=k_size, + padding=padding, + stride=stride, + bias=bias, + dilation=dilation, + ) + + else: + conv_mod = nn.Conv2d( + int(in_channels), + int(n_filters), + kernel_size=k_size, + padding=padding, + stride=stride, + bias=bias, + dilation=1, + ) + + self.cb_unit = nn.Sequential(conv_mod, nn.BatchNorm2d(int(n_filters)),) + + def forward(self, inputs): + outputs = self.cb_unit(inputs) + return outputs + + +class deconv2DBatchNorm(nn.Module): + def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True): + super(deconv2DBatchNorm, self).__init__() + + self.dcb_unit = nn.Sequential( + nn.ConvTranspose2d( + int(in_channels), int(n_filters), kernel_size=k_size, padding=padding, stride=stride, bias=bias, + ), + nn.BatchNorm2d(int(n_filters)), + ) + + def forward(self, inputs): + outputs = self.dcb_unit(inputs) + return outputs + + +class conv2DBatchNormRelu(nn.Module): + def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True, dilation=1): + super(conv2DBatchNormRelu, self).__init__() + + if dilation > 1: + conv_mod = nn.Conv2d( + int(in_channels), + int(n_filters), + kernel_size=k_size, + padding=padding, + stride=stride, + bias=bias, + dilation=dilation, + ) + + else: + conv_mod = nn.Conv2d( + int(in_channels), + int(n_filters), + kernel_size=k_size, + padding=padding, + stride=stride, + bias=bias, + dilation=1, + ) + + self.cbr_unit = nn.Sequential(conv_mod, nn.BatchNorm2d(int(n_filters)), nn.ReLU(inplace=True),) + + def forward(self, inputs): + outputs = self.cbr_unit(inputs) + return outputs + + +class deconv2DBatchNormRelu(nn.Module): + def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True): + super(deconv2DBatchNormRelu, self).__init__() + + self.dcbr_unit = nn.Sequential( + nn.ConvTranspose2d( + int(in_channels), int(n_filters), kernel_size=k_size, padding=padding, stride=stride, bias=bias, + ), + nn.BatchNorm2d(int(n_filters)), + nn.ReLU(inplace=True), + ) + + def forward(self, inputs): + outputs = self.dcbr_unit(inputs) + return outputs diff --git a/cv_lib/cv_lib/segmentation/penobscot/engine.py b/cv_lib/cv_lib/segmentation/penobscot/engine.py new file mode 100644 index 00000000..0b1273bb --- /dev/null +++ b/cv_lib/cv_lib/segmentation/penobscot/engine.py @@ -0,0 +1,119 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import torch + +from ignite.engine.engine import Engine +from toolz import curry +from torch.nn import functional as F + + +def _upscale_model_output(y_pred, y): + ph, pw = y_pred.size(2), y_pred.size(3) + h, w = y.size(2), y.size(3) + if ph != h or pw != w: + y_pred = F.upsample(input=y_pred, size=(h, w), mode="bilinear") + return y_pred + + +def create_supervised_trainer( + model, + optimizer, + loss_fn, + prepare_batch, + device=None, + non_blocking=False, + output_transform=lambda x, y, y_pred, loss: {"loss": loss.item()}, +): + """Factory function for creating a trainer for supervised segmentation models. + + Args: + model (`torch.nn.Module`): the model to train. + optimizer (`torch.optim.Optimizer`): the optimizer to use. + loss_fn (torch.nn loss function): the loss function to use. + prepare_batch (callable): function that receives `batch`, `device`, `non_blocking` and outputs + tuple of tensors `(batch_x, batch_y, patch_id, patch_locations)`. + device (str, optional): device type specification (default: None). + Applies to both model and batches. + non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously + with respect to the host. For other cases, this argument has no effect. + output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value + to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`. + + Note: `engine.state.output` for this engine is defined by `output_transform` parameter and is the loss + of the processed batch by default. + + Returns: + Engine: a trainer engine with supervised update function. + """ + if device: + model.to(device) + + def _update(engine, batch): + model.train() + optimizer.zero_grad() + x, y, ids, patch_locations = prepare_batch(batch, device=device, non_blocking=non_blocking) + y_pred = model(x) + y_pred = _upscale_model_output(y_pred, y) + loss = loss_fn(y_pred.squeeze(1), y.squeeze(1)) + loss.backward() + optimizer.step() + return output_transform(x, y, y_pred, loss) + + return Engine(_update) + + +@curry +def val_transform(x, y, y_pred, ids, patch_locations): + return { + "image": x, + "y_pred": y_pred.detach(), + "mask": y.detach(), + "ids": ids, + "patch_locations": patch_locations, + } + + +def create_supervised_evaluator( + model, prepare_batch, metrics=None, device=None, non_blocking=False, output_transform=val_transform, +): + """Factory function for creating an evaluator for supervised segmentation models. + + Args: + model (`torch.nn.Module`): the model to train. + prepare_batch (callable): function that receives `batch`, `device`, `non_blocking` and outputs + tuple of tensors `(batch_x, batch_y, patch_id, patch_locations)`. + metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics. + device (str, optional): device type specification (default: None). + Applies to both model and batches. + non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously + with respect to the host. For other cases, this argument has no effect. + output_transform (callable, optional): function that receives 'x', 'y', 'y_pred' and returns value + to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits + output expected by metrics. If you change it you should use `output_transform` in metrics. + + Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is + a tuple of `(batch_pred, batch_y)` by default. + + Returns: + Engine: an evaluator engine with supervised inference function. + """ + metrics = metrics or {} + + if device: + model.to(device) + + def _inference(engine, batch): + model.eval() + with torch.no_grad(): + x, y, ids, patch_locations = prepare_batch(batch, device=device, non_blocking=non_blocking) + y_pred = model(x) + y_pred = _upscale_model_output(y_pred, x) + return output_transform(x, y, y_pred, ids, patch_locations) + + engine = Engine(_inference) + + for name, metric in metrics.items(): + metric.attach(engine, name) + + return engine diff --git a/cv_lib/cv_lib/segmentation/utils.py b/cv_lib/cv_lib/segmentation/utils.py new file mode 100644 index 00000000..07951e88 --- /dev/null +++ b/cv_lib/cv_lib/segmentation/utils.py @@ -0,0 +1,39 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import numpy as np +from deepseismic_interpretation.dutchf3.data import decode_segmap +from os import path +from PIL import Image +from toolz import pipe + + +def _chw_to_hwc(image_array_numpy): + return np.moveaxis(image_array_numpy, 0, -1) + + +def save_images(pred_dict, output_dir, num_classes, colours, extra_identifier=""): + for id in pred_dict: + save_image( + pred_dict[id].unsqueeze(0).cpu().numpy(), + output_dir, + num_classes, + colours, + extra_identifier=extra_identifier, + ) + + +def save_image(image_numpy_array, output_dir, num_classes, colours, extra_identifier=""): + """Save segmentation map as image + + Args: + image_numpy_array (numpy.Array): numpy array that represents an image + output_dir ([type]): + num_classes ([type]): [description] + colours ([type]): [description] + extra_identifier (str, optional): [description]. Defaults to "". + """ + im_array = decode_segmap(image_numpy_array, n_classes=num_classes, label_colours=colours,) + im = pipe((im_array * 255).astype(np.uint8).squeeze(), _chw_to_hwc, Image.fromarray,) + filename = path.join(output_dir, f"{id}_{extra_identifier}.png") + im.save(filename) diff --git a/cv_lib/cv_lib/utils.py b/cv_lib/cv_lib/utils.py new file mode 100644 index 00000000..d3e41aeb --- /dev/null +++ b/cv_lib/cv_lib/utils.py @@ -0,0 +1,19 @@ +import os +import logging + + +def load_log_configuration(log_config_file): + """ + Loads logging configuration from the given configuration file. + """ + if not os.path.exists(log_config_file) or not os.path.isfile(log_config_file): + msg = "%s configuration file does not exist!", log_config_file + logging.getLogger(__name__).error(msg) + raise ValueError(msg) + try: + logging.config.fileConfig(log_config_file, disable_existing_loggers=False) + logging.getLogger(__name__).info("%s configuration file was loaded.", log_config_file) + except Exception as e: + logging.getLogger(__name__).error("Failed to load configuration from %s!", log_config_file) + logging.getLogger(__name__).debug(str(e), exc_info=True) + raise e diff --git a/cv_lib/requirements.txt b/cv_lib/requirements.txt new file mode 100644 index 00000000..e543e89a --- /dev/null +++ b/cv_lib/requirements.txt @@ -0,0 +1,9 @@ +numpy>=1.16.4 +toolz>=0.9.0 +pandas>=0.24.2 +ignite>=1.1.0 +scikit_learn>=0.21.3 +tensorboardX>=1.8 +torch>=1.2.0 +torchvision>=0.4.0 +tqdm>=4.33.0 diff --git a/cv_lib/setup.py b/cv_lib/setup.py new file mode 100644 index 00000000..23353ae0 --- /dev/null +++ b/cv_lib/setup.py @@ -0,0 +1,54 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# /* spell-checker: disable */ +import os + +try: + from setuptools import setup, find_packages +except ImportError: + from distutils.core import setup, find_packages + + +# Package meta-data. +NAME = "cv_lib" +DESCRIPTION = "A library for computer vision" +URL = "" +EMAIL = "msalvaris@users.noreply.github.com" +AUTHOR = "AUTHORS.md" +LICENSE = "" +LONG_DESCRIPTION = DESCRIPTION + + +with open("requirements.txt") as f: + requirements = f.read().splitlines() + + +here = os.path.abspath(os.path.dirname(__file__)) + +# Load the package's __version__.py module as a dictionary. +about = {} +with open(os.path.join(here, NAME, "__version__.py")) as f: + exec(f.read(), about) + + +setup( + name=NAME, + version=about["__version__"], + url=URL, + license=LICENSE, + author=AUTHOR, + author_email=EMAIL, + description=DESCRIPTION, + long_description=LONG_DESCRIPTION, + scripts=[], + packages=find_packages(), + include_package_data=True, + install_requires=requirements, + classifiers=[ + "Development Status :: 1 - Alpha", + "Intended Audience :: Data Scientists & Developers", + "Operating System :: POSIX", + "Operating System :: POSIX :: Linux", + "Programming Language :: Python :: 3.6", + ], +) diff --git a/cv_lib/tests/test_metrics.py b/cv_lib/tests/test_metrics.py new file mode 100644 index 00000000..23a671eb --- /dev/null +++ b/cv_lib/tests/test_metrics.py @@ -0,0 +1,126 @@ +import torch +import numpy as np +from pytest import approx + +from ignite.metrics import ConfusionMatrix, MetricsLambda + +from cv_lib.segmentation.metrics import class_accuracy, mean_class_accuracy + + +# source repo: +# https://github.com/pytorch/ignite/blob/master/tests/ignite/metrics/test_confusion_matrix.py +def _get_y_true_y_pred(): + # Generate an image with labels 0 (background), 1, 2 + # 3 classes: + y_true = np.zeros((30, 30), dtype=np.int) + y_true[1:11, 1:11] = 1 + y_true[15:25, 15:25] = 2 + + y_pred = np.zeros((30, 30), dtype=np.int) + y_pred[20:30, 1:11] = 1 + y_pred[20:30, 20:30] = 2 + return y_true, y_pred + + +# source repo: +# https://github.com/pytorch/ignite/blob/master/tests/ignite/metrics/test_confusion_matrix.py +def _compute_th_y_true_y_logits(y_true, y_pred): + # Create torch.tensor from numpy + th_y_true = torch.from_numpy(y_true).unsqueeze(0) + # Create logits torch.tensor: + num_classes = max(np.max(y_true), np.max(y_pred)) + 1 + y_probas = np.ones((num_classes,) + y_true.shape) * -10 + for i in range(num_classes): + y_probas[i, (y_pred == i)] = 720 + th_y_logits = torch.from_numpy(y_probas).unsqueeze(0) + return th_y_true, th_y_logits + + +# Dependency metrics do not get updated automatically, so need to retrieve and +# update confusion matrix manually +def _get_cm(metriclambda): + metrics = list(metriclambda.args) + while metrics: + metric = metrics[0] + if isinstance(metric, ConfusionMatrix): + return metric + elif isinstance(metric, MetricsLambda): + metrics.extend(metric.args) + del metrics[0] + + +def test_class_accuracy(): + y_true, y_pred = _get_y_true_y_pred() + + ## Perfect prediction + th_y_true, th_y_logits = _compute_th_y_true_y_logits(y_true, y_true) + # Update metric + output = (th_y_logits, th_y_true) + acc_metric = class_accuracy(num_classes=3) + acc_metric.update(output) + + # Retrieve and update confusion matrix + metric_cm = _get_cm(acc_metric) + # assert confusion matrix exists and is all zeroes + assert metric_cm is not None + assert torch.min(metric_cm.confusion_matrix) == 0.0 and torch.max(metric_cm.confusion_matrix) == 0.0 + metric_cm.update(output) + + # Expected result + true_res = [1.0, 1.0, 1.0] + res = acc_metric.compute().numpy() + assert np.all(res == true_res), "Result {} vs. expected values {}".format(res, true_res) + + ## Imperfect prediction + th_y_true, th_y_logits = _compute_th_y_true_y_logits(y_true, y_pred) + # Update metric + output = (th_y_logits, th_y_true) + acc_metric = class_accuracy(num_classes=3) + acc_metric.update(output) + + # Retrieve and update confusion matrix + metric_cm = _get_cm(acc_metric) + assert metric_cm is not None + assert torch.min(metric_cm.confusion_matrix) == 0.0 and torch.max(metric_cm.confusion_matrix) == 0.0 + metric_cm.update(output) + + # Expected result + true_res = [0.75, 0.0, 0.25] + res = acc_metric.compute().numpy() + assert np.all(res == true_res), "Result {} vs. expected values {}".format(res, true_res) + + +def test_mean_class_accuracy(): + y_true, y_pred = _get_y_true_y_pred() + + ## Perfect prediction + th_y_true, th_y_logits = _compute_th_y_true_y_logits(y_true, y_true) + # Update metric + output = (th_y_logits, th_y_true) + acc_metric = mean_class_accuracy(num_classes=3) + acc_metric.update(output) + + # Retrieve and update confusion matrix + metric_cm = _get_cm(acc_metric) + metric_cm.update(output) + + # Expected result + true_res = 1.0 + res = acc_metric.compute().numpy() + assert res == approx(true_res), "Result {} vs. expected value {}".format(res, true_res) + + ## Imperfect prediction + th_y_true, th_y_logits = _compute_th_y_true_y_logits(y_true, y_pred) + # Update metric + output = (th_y_logits, th_y_true) + acc_metric = mean_class_accuracy(num_classes=3) + acc_metric.update(output) + + # Retrieve and update confusion matrix + metric_cm = _get_cm(acc_metric) + metric_cm.update(output) + + # Expected result + true_res = 1 / 3 + res = acc_metric.compute().numpy() + assert res == approx(true_res), "Result {} vs. expected value {}".format(res, true_res) diff --git a/deepseismic/__init__.py b/deepseismic/__init__.py deleted file mode 100644 index 8dc07e06..00000000 --- a/deepseismic/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from . import cli, forward, velocity - -__all__ = ["cli", "forward", "velocity"] diff --git a/deepseismic/cli/__init__.py b/deepseismic/cli/__init__.py deleted file mode 100644 index 1b0db11d..00000000 --- a/deepseismic/cli/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -from functools import partial - -import click - -from . import forward, velocity - -click.option = partial(click.option, show_default=True) - - -@click.group() -@click.pass_context -def cli(ctx): - ctx.ensure_object(dict) - - -cli.add_command(forward.fwd) -cli.add_command(velocity.vp) - - -def main(): - cli(obj={}) diff --git a/deepseismic/cli/forward.py b/deepseismic/cli/forward.py deleted file mode 100644 index 0ef69d39..00000000 --- a/deepseismic/cli/forward.py +++ /dev/null @@ -1,123 +0,0 @@ -from functools import partial - -import click -import h5py -import numpy as np - -from ..forward import Receiver, RickerSource, TimeAxis, VelocityModel - -click.option = partial(click.option, show_default=True) - - -@click.group() -@click.argument("input", type=click.Path()) -@click.argument("output", type=click.Path()) -@click.option( - "-d", - "--duration", - default=1000.0, - type=float, - help="Simulation duration (in ms)", -) -@click.option("-dt", default=2.0, type=float, help="Time increment (in ms)") -@click.option( - "--n-pml", default=10, type=int, help="PML size (in grid points)" -) -@click.option( - "--n-receivers", - default=11, - type=int, - help="Number of receivers per horizontal dimension", -) -@click.option("--space-order", default=2, type=int, help="Space order") -@click.option( - "--spacing", default=10.0, type=float, help="Spacing between grid points" -) -@click.pass_context -def fwd( - ctx, - dt: float, - duration: float, - input: str, - n_pml: int, - n_receivers: int, - output: str, - space_order: int, - spacing: float, -): - """Forward modelling""" - if dt: - ctx.obj["dt"] = dt - ctx.obj["duration"] = duration - ctx.obj["input_file"] = h5py.File(input, mode="r") - ctx.obj["n_pml"] = n_pml - ctx.obj["n_receivers"] = n_receivers - ctx.obj["output_file"] = h5py.File(output, mode="w") - ctx.obj["space_order"] = space_order - ctx.obj["spacing"] = spacing - - -@fwd.command() -@click.option( - "-f0", default=0.01, type=float, help="Source peak frequency (in kHz)" -) -@click.pass_context -def ricker(ctx, f0: float): - """Ricker source""" - input_file = ctx.obj["input_file"] - output_file = ctx.obj["output_file"] - n = sum(len(x.values()) for x in input_file.values()) - with click.progressbar(length=n) as bar: - for input_group_name, input_group in input_file.items(): - for dataset in input_group.values(): - first_dataset = dataset - break - model = VelocityModel( - shape=first_dataset.shape, - origin=tuple(0.0 for _ in first_dataset.shape), - spacing=tuple(ctx.obj["spacing"] for _ in first_dataset.shape), - vp=first_dataset[()], - space_order=ctx.obj["space_order"], - n_pml=ctx.obj["n_pml"], - ) - time_range = TimeAxis( - start=0.0, stop=ctx.obj["duration"], step=ctx.obj["dt"] - ) - source = RickerSource( - name="source", - grid=model.grid, - f0=f0, - npoint=1, - time_range=time_range, - ) - source.coordinates.data[0, :] = np.array(model.domain_size) * 0.5 - source.coordinates.data[0, -1] = 0.0 - n_receivers = ctx.obj["n_receivers"] - total_receivers = n_receivers ** (len(model.shape) - 1) - receivers = Receiver( - name="receivers", - grid=model.grid, - npoint=total_receivers, - time_range=time_range, - ) - receivers_coords = np.meshgrid( - *( - np.linspace(start=0, stop=s, num=n_receivers + 2)[1:-1] - for s in model.domain_size[:-1] - ) - ) - for d in range(len(receivers_coords)): - receivers.coordinates.data[:, d] = receivers_coords[ - d - ].flatten() - receivers.coordinates.data[:, -1] = 0.0 - output_group = output_file.create_group(input_group_name) - for input_dataset_name, vp in input_group.items(): - model.vp = vp[()] - seismograms = model.solve( - source=source, receivers=receivers, time_range=time_range - ) - output_group.create_dataset( - input_dataset_name, data=seismograms - ) - bar.update(1) diff --git a/deepseismic/cli/velocity.py b/deepseismic/cli/velocity.py deleted file mode 100644 index 1c87c340..00000000 --- a/deepseismic/cli/velocity.py +++ /dev/null @@ -1,96 +0,0 @@ -from functools import partial -from itertools import islice -from typing import Tuple - -import click -import h5py - -from ..velocity import RoethTarantolaGenerator - -click.option = partial(click.option, show_default=True) - - -@click.group() -@click.argument("output", type=click.Path()) -@click.option( - "--append/--no-append", - default=False, - help="Whether to append to output file", -) -@click.option("-n", default=1, type=int, help="Number of simulations") -@click.option( - "-nx", - default=100, - type=int, - help="Number of grid points along the first dimension", -) -@click.option( - "-ny", - default=100, - type=int, - help="Number of grid points along the second dimension", -) -@click.option( - "-nz", type=int, help="Number of grid points along the third dimension" -) -@click.option("-s", "--seed", default=42, type=int, help="Random seed") -@click.pass_context -def vp( - ctx, - append: bool, - n: int, - nx: int, - ny: int, - nz: int, - output: str, - seed: int, -): - """Vp simulation""" - shape = (nx, ny) - if nz is not None: - shape += (nz,) - output_file = h5py.File(output, mode=("a" if append else "w")) - output_group = output_file.create_group( - str(max((int(x) for x in output_file.keys()), default=-1) + 1) - ) - ctx.obj["n"] = n - ctx.obj["output_file"] = output_file - ctx.obj["output_group"] = output_group - ctx.obj["seed"] = seed - ctx.obj["shape"] = shape - - -@vp.command() -@click.option("--n-layers", default=8, type=int, help="Number of layers") -@click.option( - "--initial-vp", - default=(1350.0, 1650.0), - type=(float, float), - help="Initial Vp (in km/s)", -) -@click.option( - "--vp-perturbation", - default=(-190.0, 570.0), - type=(float, float), - help="Per-layer Vp perturbation (in km/s)", -) -@click.pass_context -def rt( - ctx, - initial_vp: Tuple[float, float], - n_layers: int, - vp_perturbation: Tuple[float, float], -): - """Röth-Tarantola model""" - model = RoethTarantolaGenerator( - shape=ctx.obj["shape"], - seed=ctx.obj["seed"], - n_layers=n_layers, - initial_vp=initial_vp, - vp_perturbation=vp_perturbation, - ) - group = ctx.obj["output_group"] - with click.progressbar(length=ctx.obj["n"]) as bar: - for i, data in enumerate(islice(model.generate_many(), ctx.obj["n"])): - group.create_dataset(str(i), data=data, compression="gzip") - bar.update(1) diff --git a/deepseismic/forward/__init__.py b/deepseismic/forward/__init__.py deleted file mode 100644 index f9a9083f..00000000 --- a/deepseismic/forward/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -from .models import Model, VelocityModel -from .sources import Receiver, RickerSource, WaveletSource -from .time import TimeAxis -from .types import Kernel - -__all__ = [ - "Kernel", - "Model", - "Receiver", - "RickerSource", - "TimeAxis", - "VelocityModel", - "WaveletSource", -] diff --git a/deepseismic/forward/models.py b/deepseismic/forward/models.py deleted file mode 100644 index f07b7a1c..00000000 --- a/deepseismic/forward/models.py +++ /dev/null @@ -1,162 +0,0 @@ -from typing import Optional, Tuple, Union - -import numpy as np -from devito import ( - Constant, - Eq, - Function, - Grid, - Operator, - SubDomain, - TimeFunction, - logger, - solve, -) - -from .sources import PointSource -from .subdomains import PhysicalDomain -from .time import TimeAxis -from .types import Kernel - -logger.set_log_level("WARNING") - - -class Model(object): - def __init__( - self, - shape: Tuple[int, ...], - origin: Tuple[float, ...], - spacing: Tuple[float, ...], - n_pml: Optional[int] = 0, - dtype: Optional[type] = np.float32, - subdomains: Optional[Tuple[SubDomain]] = (), - ): - shape = tuple(int(x) for x in shape) - origin = tuple(dtype(x) for x in origin) - n_pml = int(n_pml) - subdomains = tuple(subdomains) + (PhysicalDomain(n_pml),) - shape_pml = tuple(x + 2 * n_pml for x in shape) - extent_pml = tuple(s * (d - 1) for s, d in zip(spacing, shape_pml)) - origin_pml = tuple( - dtype(o - s * n_pml) for o, s in zip(origin, spacing) - ) - self.grid = Grid( - shape=shape_pml, - extent=extent_pml, - origin=origin_pml, - dtype=dtype, - subdomains=subdomains, - ) - self.n_pml = n_pml - self.pml = Function(name="pml", grid=self.grid) - pml_data = np.pad( - np.zeros(shape, dtype=dtype), - [(n_pml,) * 2 for _ in range(self.pml.ndim)], - mode="edge", - ) - pml_coef = 1.5 * np.log(1000.0) / 40.0 - for d in range(self.pml.ndim): - for i in range(n_pml): - pos = np.abs((n_pml - i + 1) / n_pml) - val = pml_coef * (pos - np.sin(2 * np.pi * pos) / (2 * np.pi)) - idx = [slice(0, x) for x in pml_data.shape] - idx[d] = slice(i, i + 1) - pml_data[tuple(idx)] += val / self.grid.spacing[d] - idx[d] = slice( - pml_data.shape[d] - i, pml_data.shape[d] - i + 1 - ) - pml_data[tuple(idx)] += val / self.grid.spacing[d] - pml_data = np.pad( - pml_data, - [(i.left, i.right) for i in self.pml._size_halo], - mode="edge", - ) - self.pml.data_with_halo[:] = pml_data - self.shape = shape - - @property - def domain_size(self) -> Tuple[float, ...]: - return tuple((d - 1) * s for d, s in zip(self.shape, self.spacing)) - - @property - def dtype(self) -> type: - return self.grid.dtype - - @property - def spacing(self): - return self.grid.spacing - - @property - def spacing_map(self): - return self.grid.spacing_map - - @property - def time_spacing(self): - return self.grid.stepping_dim.spacing - - -class VelocityModel(Model): - def __init__( - self, - shape: Tuple[int, ...], - origin: Tuple[float, ...], - spacing: Tuple[float, ...], - vp: Union[float, np.ndarray], - space_order: Optional[int] = None, - n_pml: Optional[int] = 0, - dtype: Optional[type] = np.float32, - subdomains: Optional[Tuple[SubDomain]] = (), - ): - super().__init__(shape, origin, spacing, n_pml, dtype, subdomains) - if isinstance(vp, np.ndarray): - assert space_order is not None - self.m = Function( - name="m", grid=self.grid, space_order=int(space_order) - ) - else: - self.m = Constant(name="m", value=1.0 / float(vp) ** 2.0) - self.vp = vp - - @property - def vp(self) -> Union[float, np.ndarray]: - return self._vp - - @vp.setter - def vp(self, vp: Union[float, np.ndarray]) -> None: - self._vp = vp - if isinstance(vp, np.ndarray): - pad_widths = [ - (self.n_pml + i.left, self.n_pml + i.right) - for i in self.m._size_halo - ] - self.m.data_with_halo[:] = np.pad( - 1.0 / self.vp ** 2.0, pad_widths, mode="edge" - ) - else: - self.m.data = 1.0 / float(vp) ** 2.0 - - def solve( - self, - source: PointSource, - receivers: PointSource, - time_range: TimeAxis, - space_order: Optional[int] = 4, - kernel: Optional[Kernel] = Kernel.OT2, - ) -> np.ndarray: - assert isinstance(kernel, Kernel) - u = TimeFunction( - name="u", grid=self.grid, time_order=2, space_order=space_order - ) - H = u.laplace - if kernel is Kernel.OT4: - H += self.time_spacing ** 2 / 12 * u.laplace2(1 / self.m) - eq = Eq( - u.forward, solve(self.m * u.dt2 - H + self.pml * u.dt, u.forward) - ) - src_term = source.inject( - field=u.forward, expr=source * self.time_spacing ** 2 / self.m - ) - rec_term = receivers.interpolate(expr=u) - op = Operator([eq] + src_term + rec_term, subs=self.spacing_map) - op(time=time_range.num - 1, dt=time_range.step) - return receivers.data diff --git a/deepseismic/forward/sources.py b/deepseismic/forward/sources.py deleted file mode 100644 index 5a0470e2..00000000 --- a/deepseismic/forward/sources.py +++ /dev/null @@ -1,132 +0,0 @@ -from typing import Optional - -import numpy as np -import sympy -from devito.types import Dimension, SparseTimeFunction -from devito.types.basic import _SymbolCache -from scipy import interpolate - -from .time import TimeAxis - - -class PointSource(SparseTimeFunction): - def __new__(cls, *args, **kwargs): - if cls in _SymbolCache: - options = kwargs.get("options", {}) - obj = sympy.Function.__new__(cls, *args, **options) - obj._cached_init() - return obj - name = kwargs.pop("name") - grid = kwargs.pop("grid") - time_range = kwargs.pop("time_range") - time_order = kwargs.pop("time_order", 2) - p_dim = kwargs.pop("dimension", Dimension(name="p_%s" % name)) - npoint = kwargs.pop("npoint", None) - coordinates = kwargs.pop( - "coordinates", kwargs.pop("coordinates_data", None) - ) - if npoint is None: - assert ( - coordinates is not None - ), "Either `npoint` or `coordinates` must be provided" - npoint = coordinates.shape[0] - obj = SparseTimeFunction.__new__( - cls, - name=name, - grid=grid, - dimensions=(grid.time_dim, p_dim), - npoint=npoint, - nt=time_range.num, - time_order=time_order, - coordinates=coordinates, - **kwargs - ) - obj._time_range = time_range - data = kwargs.get("data") - if data is not None: - obj.data[:] = data - return obj - - @property - def time_range(self) -> TimeAxis: - return self._time_range - - @property - def time_values(self) -> np.ndarray: - return self._time_range.time_values - - def resample( - self, - dt: Optional[float] = None, - num: Optional[int] = None, - rtol: Optional[float] = 1.0e-5, - order: Optional[int] = 3, - ): - assert (dt is not None) ^ ( - num is not None - ), "Exactly one of `dt` or `num` must be provided" - start = self._time_range.start - stop = self._time_range.stop - dt0 = self._time_range.step - if dt is not None: - new_time_range = TimeAxis(start=start, stop=stop, step=dt) - else: - new_time_range = TimeAxis(start=start, stop=stop, num=num) - dt = new_time_range.step - if np.isclose(dt0, dt, rtol=rtol): - return self - n_traces = self.data.shape[1] - new_traces = np.zeros( - (new_time_range.num, n_traces), dtype=self.data.dtype - ) - for j in range(n_traces): - tck = interpolate.splrep( - self._time_range.time_values, self.data[:, j], k=order - ) - new_traces[:, j] = interpolate.splev( - new_time_range.time_values, tck - ) - return PointSource( - name=self.name, - grid=self.grid, - time_range=new_time_range, - coordinates=self.coordinates.data, - data=new_traces, - ) - - _pickle_kwargs = SparseTimeFunction._pickle_kwargs + ["time_range"] - _pickle_kwargs.remove("nt") # Inferred from time_range - - -class Receiver(PointSource): - pass - - -class WaveletSource(PointSource): - def __new__(cls, *args, **kwargs): - if cls in _SymbolCache: - options = kwargs.get("options", {}) - obj = sympy.Function.__new__(cls, *args, **options) - obj._cached_init() - return obj - npoint = kwargs.pop("npoint", 1) - obj = PointSource.__new__(cls, npoint=npoint, **kwargs) - obj.f0 = kwargs.get("f0") - for p in range(npoint): - obj.data[:, p] = obj.wavelet(obj.f0, obj.time_values) - return obj - - def __init__(self, *args, **kwargs): - if not self._cached(): - super(WaveletSource, self).__init__(*args, **kwargs) - - def wavelet(self, f0: float, t: np.ndarray) -> np.ndarray: - raise NotImplementedError - - _pickle_kwargs = PointSource._pickle_kwargs + ["f0"] - - -class RickerSource(WaveletSource): - def wavelet(self, f0: float, t: np.ndarray) -> np.ndarray: - r = np.pi * f0 * (t - 1.0 / f0) - return (1.0 - 2.0 * r ** 2.0) * np.exp(-r ** 2.0) diff --git a/deepseismic/forward/subdomains.py b/deepseismic/forward/subdomains.py deleted file mode 100644 index 2ed6cedb..00000000 --- a/deepseismic/forward/subdomains.py +++ /dev/null @@ -1,16 +0,0 @@ -from typing import Dict, Iterable, Tuple - -from devito import Dimension, SubDomain - - -class PhysicalDomain(SubDomain): - name = "physical_domain" - - def __init__(self, n_pml: int): - super().__init__() - self.n_pml = n_pml - - def define( - self, dimensions: Iterable[Dimension] - ) -> Dict[Dimension, Tuple[str, int, int]]: - return {d: ("middle", self.n_pml, self.n_pml) for d in dimensions} diff --git a/deepseismic/forward/time.py b/deepseismic/forward/time.py deleted file mode 100644 index d3dfc00d..00000000 --- a/deepseismic/forward/time.py +++ /dev/null @@ -1,34 +0,0 @@ -from typing import Optional - -import numpy as np - - -class TimeAxis(object): - def __init__( - self, - start: Optional[float] = None, - stop: Optional[float] = None, - num: Optional[int] = None, - step: Optional[float] = None, - dtype: Optional[type] = np.float32, - ): - if start is None: - start = step * (1 - num) + stop - elif stop is None: - stop = step * (num - 1) + start - elif num is None: - num = int(np.ceil((stop - start + step) / step)) - stop = step * (num - 1) + start - elif step is None: - step = (stop - start) / (num - 1) - else: - raise ValueError - self.start = start - self.stop = stop - self.num = num - self.step = step - self.dtype = dtype - - @property - def time_values(self) -> np.ndarray: - return np.linspace(self.start, self.stop, self.num, dtype=self.dtype) diff --git a/deepseismic/forward/types.py b/deepseismic/forward/types.py deleted file mode 100644 index 772f67b7..00000000 --- a/deepseismic/forward/types.py +++ /dev/null @@ -1,6 +0,0 @@ -from enum import Enum, auto - - -class Kernel(Enum): - OT2 = auto() - OT4 = auto() diff --git a/deepseismic/velocity/__init__.py b/deepseismic/velocity/__init__.py deleted file mode 100644 index 98225180..00000000 --- a/deepseismic/velocity/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .generator import Generator -from .roeth_tarantola import RoethTarantolaGenerator - -__all__ = ["Generator", "RoethTarantolaGenerator"] diff --git a/deepseismic/velocity/generator.py b/deepseismic/velocity/generator.py deleted file mode 100644 index ddc2eb4a..00000000 --- a/deepseismic/velocity/generator.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing import Optional, Tuple - -import numpy as np - - -class Generator(object): - def __init__( - self, - shape: Tuple[int, ...], - dtype: Optional[type] = np.float32, - seed: Optional[int] = None, - ): - self.shape = shape - self.dtype = dtype - self._prng = np.random.RandomState(seed) - - def generate(self) -> np.ndarray: - raise NotImplementedError - - def generate_many(self) -> np.ndarray: - while True: - yield self.generate() diff --git a/deepseismic/velocity/roeth_tarantola.py b/deepseismic/velocity/roeth_tarantola.py deleted file mode 100644 index 6c3c0cc4..00000000 --- a/deepseismic/velocity/roeth_tarantola.py +++ /dev/null @@ -1,41 +0,0 @@ -from typing import Optional, Tuple - -import numpy as np - -from .generator import Generator - - -class RoethTarantolaGenerator(Generator): - def __init__( - self, - shape: Tuple[int, ...], - dtype: Optional[type] = np.float32, - seed: Optional[int] = None, - depth_dim: Optional[int] = -1, - n_layers: Optional[int] = 8, - initial_vp: Optional[Tuple[float, float]] = (1.35, 1.65), - vp_perturbation: Optional[Tuple[float, float]] = (-0.19, 0.57), - ): - super().__init__(shape, dtype, seed) - self.depth_dim = depth_dim - self.n_layers = n_layers - self.initial_vp = initial_vp - self.vp_perturbation = vp_perturbation - - def generate(self) -> np.ndarray: - vp = np.zeros(self.shape, dtype=self.dtype) - dim = self.depth_dim - layer_idx = np.round( - np.linspace(0, self.shape[dim], self.n_layers + 1) - ).astype(np.int) - vp_idx = [slice(0, x) for x in vp.shape] - layer_vp = None - for i in range(self.n_layers): - vp_idx[dim] = slice(layer_idx[i], layer_idx[i + 1]) - layer_vp = ( - self._prng.uniform(*self.initial_vp) - if layer_vp is None - else layer_vp + self._prng.uniform(*self.vp_perturbation) - ) - vp[tuple(vp_idx)] = layer_vp - return vp diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 00000000..f85ae6a2 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,6 @@ +# Documentation + +To setup the documentation, first you need to install the dependencies of the full environment. For it please follow the [SETUP.md](../SETUP.md). + +TODO: add more text + diff --git a/environment/anaconda/local/environment.yml b/environment/anaconda/local/environment.yml new file mode 100644 index 00000000..4eebd0b3 --- /dev/null +++ b/environment/anaconda/local/environment.yml @@ -0,0 +1,38 @@ +name: seismic-interpretation +channels: + - conda-forge + - pytorch +dependencies: + - python=3.6.7 + - pip + - pytorch==1.3.1 + - cudatoolkit==10.1.243 + - jupyter + - ipykernel + - torchvision==0.4.2 + - pandas==0.25.3 + - opencv==4.1.2 + - scikit-learn==0.21.3 + - tensorflow==2.0 + - opt-einsum>=2.3.2 + - tqdm==4.39.0 + - itkwidgets==0.23.1 + - pytest + - papermill>=1.0.1 + - pip: + - segyio==1.8.8 + - pytorch-ignite==0.3.0.dev20191105 # pre-release until stable available + - fire==0.2.1 + - toolz==0.10.0 + - tabulate==0.8.2 + - Jinja2==2.10.3 + - gitpython==3.0.5 + - tensorboard==2.0.1 + - tensorboardx==1.9 + - invoke==1.3.0 + - yacs==0.1.6 + - albumentations==0.4.3 + - black + - pylint + - scipy==1.1.0 + - jupytext==1.3.0 diff --git a/environment/docker/apex/Makefile b/environment/docker/apex/Makefile new file mode 100644 index 00000000..57d2dabc --- /dev/null +++ b/environment/docker/apex/Makefile @@ -0,0 +1,51 @@ +define PROJECT_HELP_MSG +Makefile to control project aml_dist +Usage: + help show this message + build build docker image to use as control plane + bash run bash inside runnin docker container + stop stop running docker container +endef +export PROJECT_HELP_MSG +PWD:=$(shell pwd) +PORT:=9999 +TBOARD_PORT:=6006 +IMAGE_NAME:=ignite_image +NAME:=ignite_container # Name of running container +DATA:=/mnt + +BASEDIR:=$(shell dirname $(shell dirname ${PWD})) + +local_code_volume:=-v $(BASEDIR):/workspace +volumes:=-v $(DATA):/data \ + -v ${HOME}/.bash_history:/root/.bash_history + + +help: + echo "$$PROJECT_HELP_MSG" | less + +build: + docker build -t $(IMAGE_NAME) -f dockerfile . + +run: + # Start docker running as daemon + docker run $(local_code_volume) $(volumes) $(setup_environment_file) \ + --shm-size="4g" \ + --runtime=nvidia \ + --name $(NAME) \ + -d \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -e HIST_FILE=/root/.bash_history \ + -it $(IMAGE_NAME) + + docker exec -it $(NAME) bash + + +bash: + docker exec -it $(NAME) bash + +stop: + docker stop $(NAME) + docker rm $(NAME) + +.PHONY: help build run bash stop \ No newline at end of file diff --git a/environment/docker/apex/dockerfile b/environment/docker/apex/dockerfile new file mode 100644 index 00000000..3becd3c4 --- /dev/null +++ b/environment/docker/apex/dockerfile @@ -0,0 +1,16 @@ +FROM pytorch/pytorch:nightly-devel-cuda10.0-cudnn7 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + libglib2.0-0 \ + libsm6 \ + libxext6 \ + libxrender-dev + +RUN git clone https://github.com/NVIDIA/apex && \ + cd apex && \ + pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ + +RUN pip install toolz pytorch-ignite torchvision pandas opencv-python fire tensorboardx scikit-learn yacs + +WORKDIR /workspace +CMD /bin/bash \ No newline at end of file diff --git a/environment/docker/horovod/Makefile b/environment/docker/horovod/Makefile new file mode 100644 index 00000000..a423b726 --- /dev/null +++ b/environment/docker/horovod/Makefile @@ -0,0 +1,56 @@ +define PROJECT_HELP_MSG +Makefile to control project aml_dist +Usage: + help show this message + build build docker image to use as control plane + bash run bash inside runnin docker container + stop stop running docker container +endef +export PROJECT_HELP_MSG +PWD:=$(shell pwd) +PORT:=9999 +TBOARD_PORT:=6006 +IMAGE_NAME:=horovod_image +NAME:=horovod_container # Name of running container +DATA:=/mnt + +BASEDIR:=$(shell dirname $(shell dirname $(shell dirname ${PWD}))) +REPODIR:=$(shell dirname ${BASEDIR}) + +local_code_volume:=-v $(BASEDIR):/workspace +volumes:=-v $(DATA):/data \ + -v ${HOME}/.bash_history:/root/.bash_history + +help: + echo "$$PROJECT_HELP_MSG" | less + +build: + docker build -t $(IMAGE_NAME) -f dockerfile ${REPODIR} + +run: + @echo ${BASEDIR} + # Start docker running as daemon + docker run $(local_code_volume) $(volumes) $(setup_environment_file) \ + --privileged \ + --shm-size="4g" \ + --runtime=nvidia \ + --name $(NAME) \ + -d \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -e HIST_FILE=/root/.bash_history \ + -it $(IMAGE_NAME) + + docker exec -it $(NAME) bash + + +run-horovod: + docker exec -it $(NAME) mpirun -np 2 -bind-to none -map-by slot -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH -x PATH -mca pml ob1 -mca btl ^openib python train_horovod.py + +bash: + docker exec -it $(NAME) bash + +stop: + docker stop $(NAME) + docker rm $(NAME) + +.PHONY: help build run bash stop \ No newline at end of file diff --git a/environment/docker/horovod/dockerfile b/environment/docker/horovod/dockerfile new file mode 100644 index 00000000..0e12f455 --- /dev/null +++ b/environment/docker/horovod/dockerfile @@ -0,0 +1,130 @@ +FROM nvidia/cuda:10.0-devel-ubuntu18.04 +# Based on default horovod image + +ENV PYTORCH_VERSION=1.1.0 +ENV TORCHVISION_VERSION=0.3.0 +ENV CUDNN_VERSION=7.6.0.64-1+cuda10.0 +ENV NCCL_VERSION=2.4.7-1+cuda10.0 + +# Python 2.7 or 3.6 is supported by Ubuntu Bionic out of the box +ARG python=3.6 +ENV PYTHON_VERSION=${python} + +# Set default shell to /bin/bash +SHELL ["/bin/bash", "-cu"] + +# We need gcc-4.9 to build plugins for TensorFlow & PyTorch, which is only available in Ubuntu Xenial +RUN echo deb http://archive.ubuntu.com/ubuntu xenial main universe | tee -a /etc/apt/sources.list +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update && apt-get install -y --no-install-recommends --allow-change-held-packages --allow-downgrades \ + build-essential \ + cmake \ + gcc-4.9 \ + g++-4.9 \ + gcc-4.9-base \ + software-properties-common \ + git \ + curl \ + wget \ + ca-certificates \ + libcudnn7=${CUDNN_VERSION} \ + libnccl2=${NCCL_VERSION} \ + libnccl-dev=${NCCL_VERSION} \ + libjpeg-dev \ + libpng-dev \ + python${PYTHON_VERSION} \ + python${PYTHON_VERSION}-dev \ + librdmacm1 \ + libibverbs1 \ + ibverbs-utils\ + ibutils \ + net-tools \ + ibverbs-providers \ + libglib2.0-0 \ + libsm6 \ + libxext6 \ + libxrender-dev + + +RUN if [[ "${PYTHON_VERSION}" == "3.6" ]]; then \ + apt-get install -y python${PYTHON_VERSION}-distutils; \ + fi +RUN ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python + +RUN curl -O https://bootstrap.pypa.io/get-pip.py && \ + python get-pip.py && \ + rm get-pip.py + +# Install PyTorch +RUN pip install future typing +RUN pip install numpy +RUN pip install https://download.pytorch.org/whl/cu100/torch-${PYTORCH_VERSION}-$(python -c "import wheel.pep425tags as w; print('-'.join(w.get_supported()[0]))").whl \ + https://download.pytorch.org/whl/cu100/torchvision-${TORCHVISION_VERSION}-$(python -c "import wheel.pep425tags as w; print('-'.join(w.get_supported()[0]))").whl +RUN pip install --no-cache-dir torchvision h5py toolz pytorch-ignite pandas opencv-python fire tensorboardx scikit-learn tqdm yacs albumentations gitpython +COPY ComputerVision_fork/contrib /contrib +RUN pip install -e /contrib +COPY DeepSeismic /DeepSeismic +RUN pip install -e DeepSeismic/interpretation + +# Install Open MPI +RUN mkdir /tmp/openmpi && \ + cd /tmp/openmpi && \ + wget https://www.open-mpi.org/software/ompi/v4.0/downloads/openmpi-4.0.0.tar.gz && \ + tar zxf openmpi-4.0.0.tar.gz && \ + cd openmpi-4.0.0 && \ + ./configure --enable-orterun-prefix-by-default && \ + make -j $(nproc) all && \ + make install && \ + ldconfig && \ + rm -rf /tmp/openmpi + +# Pin GCC to 4.9 (priority 200) to compile correctly against TensorFlow, PyTorch, and MXNet. +# Backup existing GCC installation as priority 100, so that it can be recovered later. +RUN update-alternatives --install /usr/bin/gcc gcc $(readlink -f $(which gcc)) 100 && \ + update-alternatives --install /usr/bin/x86_64-linux-gnu-gcc x86_64-linux-gnu-gcc $(readlink -f $(which gcc)) 100 && \ + update-alternatives --install /usr/bin/g++ g++ $(readlink -f $(which g++)) 100 && \ + update-alternatives --install /usr/bin/x86_64-linux-gnu-g++ x86_64-linux-gnu-g++ $(readlink -f $(which g++)) 100 +RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.9 200 && \ + update-alternatives --install /usr/bin/x86_64-linux-gnu-gcc x86_64-linux-gnu-gcc /usr/bin/gcc-4.9 200 && \ + update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.9 200 && \ + update-alternatives --install /usr/bin/x86_64-linux-gnu-g++ x86_64-linux-gnu-g++ /usr/bin/g++-4.9 200 + + +# Install Horovod, temporarily using CUDA stubs +RUN ldconfig /usr/local/cuda/targets/x86_64-linux/lib/stubs && \ + HOROVOD_GPU_ALLREDUCE=NCCL HOROVOD_WITH_PYTORCH=1 pip install --no-cache-dir horovod && \ +ldconfig + +# Remove GCC pinning +RUN update-alternatives --remove gcc /usr/bin/gcc-4.9 && \ + update-alternatives --remove x86_64-linux-gnu-gcc /usr/bin/gcc-4.9 && \ + update-alternatives --remove g++ /usr/bin/g++-4.9 && \ + update-alternatives --remove x86_64-linux-gnu-g++ /usr/bin/g++-4.9 + +# Create a wrapper for OpenMPI to allow running as root by default +RUN mv /usr/local/bin/mpirun /usr/local/bin/mpirun.real && \ + echo '#!/bin/bash' > /usr/local/bin/mpirun && \ + echo 'mpirun.real --allow-run-as-root "$@"' >> /usr/local/bin/mpirun && \ + chmod a+x /usr/local/bin/mpirun + +# Configure OpenMPI to run good defaults: +# --bind-to none --map-by slot --mca btl_tcp_if_exclude lo,docker0 +RUN echo "hwloc_base_binding_policy = none" >> /usr/local/etc/openmpi-mca-params.conf && \ + echo "rmaps_base_mapping_policy = slot" >> /usr/local/etc/openmpi-mca-params.conf + # echo "btl_tcp_if_exclude = lo,docker0" >> /usr/local/etc/openmpi-mca-params.conf + +# Set default NCCL parameters +RUN echo NCCL_DEBUG=INFO >> /etc/nccl.conf && \ + echo NCCL_SOCKET_IFNAME=^docker0 >> /etc/nccl.conf + +# Install OpenSSH for MPI to communicate between containers +RUN apt-get install -y --no-install-recommends openssh-client openssh-server && \ + mkdir -p /var/run/sshd + +# Allow OpenSSH to talk to containers without asking for confirmation +RUN cat /etc/ssh/ssh_config | grep -v StrictHostKeyChecking > /etc/ssh/ssh_config.new && \ + echo " StrictHostKeyChecking no" >> /etc/ssh/ssh_config.new && \ + mv /etc/ssh/ssh_config.new /etc/ssh/ssh_config + +WORKDIR /workspace +CMD /bin/bash diff --git a/examples/interpretation/README.md b/examples/interpretation/README.md new file mode 100644 index 00000000..7f151c60 --- /dev/null +++ b/examples/interpretation/README.md @@ -0,0 +1 @@ +Description of examples diff --git a/examples/interpretation/notebooks/F3_block_training_and_evaluation_local.ipynb b/examples/interpretation/notebooks/F3_block_training_and_evaluation_local.ipynb new file mode 100644 index 00000000..320f40a3 --- /dev/null +++ b/examples/interpretation/notebooks/F3_block_training_and_evaluation_local.ipynb @@ -0,0 +1,1024 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Copyright (c) Microsoft Corporation.\n", + "\n", + "Licensed under the MIT License." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Model training and evaluation on F3 Netherlands dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Seismic interpretation, also referred to as facies classification, is a task of determining types of rock in the earth’s subsurface, given seismic data. Seismic interpretation is used as a standard approach for determining precise locations of oil deposits for drilling, therefore reducing risks and potential losses. In recent years, there has been a great interest in using fully-supervised deep learning models for seismic interpretation. \n", + "\n", + "In this notebook, we demonstrate how to train a deep neural network for facies prediction using F3 Netherlands dataset. The F3 block is located in the North Sea off the shores of Netherlands. The dataset contains 6 classes (facies or lithostratigraphic units), all of which are of varying thickness (class imbalance). Processed data is available in numpy format as a `401 x 701 x 255` array. The processed F3 data is made available by [Alaudah et al. 2019](https://github.com/olivesgatech/facies_classification_benchmark). " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Environment setup\n", + "\n", + "To set up the conda environment, please follow the instructions in the top-level [README.md](../../../README.md) file.\n", + "\n", + "__Note__: To register the conda environment in Jupyter, run:\n", + "`python -m ipykernel install --user --name envname`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Library imports" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's load required libraries." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import logging\n", + "import logging.config\n", + "from os import path\n", + "import random\n", + "import matplotlib.pyplot as plt\n", + "plt.rcParams.update({\"font.size\": 16})\n", + "\n", + "import yacs.config\n", + "\n", + "import cv2\n", + "import numpy as np\n", + "import torch\n", + "from albumentations import Compose, HorizontalFlip, Normalize, PadIfNeeded, Resize\n", + "from ignite.contrib.handlers import CosineAnnealingScheduler\n", + "from ignite.handlers import ModelCheckpoint\n", + "from ignite.engine import Events\n", + "from ignite.metrics import Loss\n", + "from ignite.utils import convert_tensor\n", + "from toolz import compose, take\n", + "from torch.utils import data\n", + "\n", + "from cv_lib.utils import load_log_configuration\n", + "from cv_lib.event_handlers import SnapshotHandler, logging_handlers\n", + "from cv_lib.segmentation import models\n", + "from cv_lib.segmentation.dutchf3.engine import create_supervised_trainer\n", + "\n", + "from cv_lib.segmentation.dutchf3.utils import (\n", + " current_datetime,\n", + " generate_path,\n", + " git_branch,\n", + " git_hash,\n", + " np_to_tb,\n", + ")\n", + "\n", + "from deepseismic_interpretation.dutchf3.data import (\n", + " get_patch_loader,\n", + " decode_segmap,\n", + " get_test_loader,\n", + ")\n", + "\n", + "from itkwidgets import view\n", + "\n", + "from utilities import (\n", + " plot_aline,\n", + " prepare_batch,\n", + " patch_label_2d,\n", + " compose_processing_pipeline,\n", + " output_processing_pipeline,\n", + " write_section_file,\n", + " runningScore,\n", + ")\n", + "\n", + "# set device\n", + "device = \"cpu\"\n", + "if torch.cuda.is_available():\n", + " device = \"cuda\"\n", + "\n", + "# number of images to score\n", + "N_EVALUATE = 20\n", + "# experiment configuration file\n", + "CONFIG_FILE = \"./configs/patch_deconvnet_skip.yaml\"\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Data download and preparation\n", + "\n", + "To download and prepare the F3 data set, please follow the instructions in the top-level [README](../../../README.md) file. Once you've downloaded and prepared the data set, you'll find your files in the following directory tree:\n", + "\n", + "```\n", + "data\n", + "├── splits\n", + "├── test_once\n", + "│ ├── test1_labels.npy\n", + "│ ├── test1_seismic.npy\n", + "│ ├── test2_labels.npy\n", + "│ └── test2_seismic.npy\n", + "└── train\n", + " ├── train_labels.npy\n", + " └── train_seismic.npy\n", + "```\n", + "\n", + "We recommend saving the data under `$HOME/data/dutchf3` since this notebook will use that location as the data root. Otherwise, modify the `DATASET.ROOT` field in the configuration file, described next. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Experiment configuration file" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We use configuration files to specify experiment configuration, such as hyperparameters used in training and evaluation, as well as other experiment settings. We provide several configuration files for this notebook, under `./configs`, mainly differing in the DNN architecture used for defining the model.\n", + "\n", + "Modify the `CONFIG_FILE` variable above if you would like to run the experiment using a different configuration file." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Configuration loaded. Please check that the DATASET.ROOT:/data/dutchf3 points to your data location.\n", + "To modify any of the options, please edit the configuration file ./configs/patch_deconvnet_skip.yaml and reload. \n", + "\n", + "CUDNN:\n", + " BENCHMARK: True\n", + " DETERMINISTIC: False\n", + " ENABLED: True\n", + "DATASET:\n", + " CLASS_WEIGHTS: [0.7151, 0.8811, 0.5156, 0.9346, 0.9683, 0.9852]\n", + " NUM_CLASSES: 6\n", + " ROOT: /data/dutchf3\n", + "GPUS: (0,)\n", + "LOG_CONFIG: logging.conf\n", + "LOG_DIR: log\n", + "MODEL:\n", + " IN_CHANNELS: 1\n", + " NAME: patch_deconvnet_skip\n", + "OUTPUT_DIR: output\n", + "PRINT_FREQ: 50\n", + "SEED: 2019\n", + "TEST:\n", + " CROSSLINE: True\n", + " INLINE: True\n", + " MODEL_PATH: /data/home/mat/repos/DeepSeismic/examples/interpretation/notebooks/output/models/model_patch_deconvnet_skip_2.pth\n", + " POST_PROCESSING:\n", + " CROP_PIXELS: 0\n", + " SIZE: 99\n", + " SPLIT: test1\n", + " TEST_STRIDE: 10\n", + "TRAIN:\n", + " AUGMENTATION: True\n", + " AUGMENTATIONS:\n", + " PAD:\n", + " HEIGHT: 99\n", + " WIDTH: 99\n", + " RESIZE:\n", + " HEIGHT: 99\n", + " WIDTH: 99\n", + " BATCH_SIZE_PER_GPU: 64\n", + " BEGIN_EPOCH: 0\n", + " DEPTH: none\n", + " END_EPOCH: 100\n", + " MAX_LR: 0.02\n", + " MEAN: 0.0009997\n", + " MIN_LR: 0.001\n", + " MODEL_DIR: models\n", + " MOMENTUM: 0.9\n", + " PATCH_SIZE: 99\n", + " SNAPSHOTS: 5\n", + " STD: 0.20977\n", + " STRIDE: 50\n", + " WEIGHT_DECAY: 0.0001\n", + "VALIDATION:\n", + " BATCH_SIZE_PER_GPU: 512\n", + "WORKERS: 4\n" + ] + } + ], + "source": [ + "with open(CONFIG_FILE, \"rt\") as f_read:\n", + " config = yacs.config.load_cfg(f_read)\n", + "\n", + "print(f'Configuration loaded. Please check that the DATASET.ROOT:{config.DATASET.ROOT} points to your data location.')\n", + "print(f'To modify any of the options, please edit the configuration file {CONFIG_FILE} and reload. \\n')\n", + "print(config)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Override parameters in case we use papermill" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "tags": [ + "parameters" + ] + }, + "outputs": [], + "source": [ + "# The number of datapoints you want to run in training or validation per batch \n", + "# Setting to None will run whole dataset\n", + "# useful for integration tests with a setting of something like 3\n", + "# Use only if you want to check things are running and don't want to run\n", + "# through whole dataset\n", + "max_iterations = None\n", + "# The number of epochs to run in training\n", + "max_epochs = config.TRAIN.END_EPOCH \n", + "max_snapshots = config.TRAIN.SNAPSHOTS\n", + "dataset_root = config.DATASET.ROOT" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# reduce number of test images if running a dummy model\n", + "if max_epochs<2:\n", + " N_EVALUATE=3" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## F3 data set " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's visualize a few sections of the F3 data set. The processed F3 data set is stored as a 3D numpy array. Let's view slices of the data along inline and crossline directions. " + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Number of inline slices: 401\n", + "Number of crossline slices: 701\n", + "Depth dimension : 255\n" + ] + } + ], + "source": [ + "# Load training data and labels\n", + "train_seismic = np.load(path.join(dataset_root, \"train/train_seismic.npy\"))\n", + "train_labels = np.load(path.join(dataset_root, \"train/train_labels.npy\"))\n", + "\n", + "print(f\"Number of inline slices: {train_seismic.shape[0]}\")\n", + "print(f\"Number of crossline slices: {train_seismic.shape[1]}\")\n", + "print(f\"Depth dimension : {train_seismic.shape[2]}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "1aadea98bda8458fbc03782571b1d4b7", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Viewer(geometries=[], gradient_opacity=0.22, point_sets=[], rendered_image=" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "idx = 100\n", + "x_in = train_seismic[idx, :, :].swapaxes(0, 1)\n", + "x_inl = train_labels[idx, :, :].swapaxes(0, 1)\n", + "\n", + "plot_aline(x_in, x_inl, xlabel=\"inline\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's plot a __crossline__ slice." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABEEAAAFuCAYAAABuowaQAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAAgAElEQVR4nOy9e9BtW3YX9Btzrr2/c+7tTuwmkoeEBCgSlCqjAU1CAhHRUgFfYKDwDwUlpHwDvoiJGoilpaZCofggsQoFijJViCYUBYaEJBhMCZSAZSgtEwzRJhJCmtB97vm+vdacwz/GY4459/5u9+2+pzvnnPGrund/e6+15pqvtc4cvznGbxAzI5FIJBKJRCKRSCQSiUTiVUf5ZFcgkUgkEolEIpFIJBKJROITgSRBEolEIpFIJBKJRCKRSLwWSBIkkUgkEolEIpFIJBKJxGuBJEESiUQikUgkEolEIpFIvBZIEiSRSCQSiUQikUgkEonEa4EkQRKJRCKRSCQSiUQikUi8FkgSJJFIJBKJRCKRSLz2IKIfIqIfeoHlMxF994sqP5FIfHRIEiSRSHxMIKLP1X/M43/PiOgDRPTHiOhrieinvQv3+W4i4nejzolEIpFIJF5+hDXI//DJrksikXj5sH2yK5BIJF56/J8A/lv9+wmAzwDwJQC+HsDXEtFXM/Nv/2RVLpFIJBKJRCKRSCQMSYIkEomPF/8HM3/d+iMR/TIAvxvANxLRh5n5mz/hNUskEolEIpFIJBKJgAyHSSQSLwTM/IcB/Ar9+h8Q0ZsAQESfRUS/jYj+FBH9VSJ6IKIfIKJvIKL3xjI0DObL7e/w39fpb2ci+pc1/OYDRHQhoh8hot9PRD/7E9faRCKRSCQSP9lARJ9HRP8xEf05IvogEd0T0fdryO7pba77KUT0u4noR4noLSL6k0T0ix859zOI6D8hor+oa5q/QkS/j4h+xotrWSKR+HiQniCJROKFgZm/l4i+B0Jk/BIA3wbgFwH4TQC+E8D/DIABfBGAfxXALyKiL2XmXYv4rQB+LYDP0b8N362f7wfwjQD+BIA/BOAnAHw+gF8F4B8kop/HzP/3i2pfIpFIJBKJn9T4FQB+HYA/DuA7ANxB1iRfD+DnA/jHblxzBvDHAJwgHq2fBuDXAPh2IvplzPztdqJuuHw3JBT4jwD47wB8NmQd8g8Q0Rcz8w++kJYlEomPGUmCJBKJFw0jQX4+hAT54wA+g5mfxZOI6Gshi5JfDeD3AQAzfx0R/T0APudWyA2ADwL46cz8l5eyvhxCsnwNgF//bjYmkUgkEonES4PfC+AbmfliPxARAfgmAL+eiL6Mmb93ueYzAfzvAH4pMx96zX8J4PsA/C4i+lnM3PXc3wMhSX4xM/+JcI8vgWzQ/A4Av/zFNC2RSHysyHCYRCLxovEj+vlpAMDMP7oSIIr/XD//vo+2YGZ+WAkQ/f17APyFd1JWIpFIJBKJVwvM/IFIgOhvDOC/0K+PrRP+HSNA9Jo/DeAPAvhcAF8GAET0hQC+GMA3RwJEz/8+AN8K4B8iok99F5qSSCTeRaQnSCKReNGgqx+IvgLAVwH4OwC8DzMh+5nvqHCinwfg3wDwpQB+KsR91XC5eVEikUgkEolXHkRUAPyzkNDanwvgUzCvS26tOXYAf+rG798L4CsAfAHEy+OL9PefZlplCz4Tsr752QD+zDuvfSKReFFIEiSRSLxo2ALjrwIAEf3rAP4jAD8KiZ/9AIB7PeffhcTrflQgoi+DhL10AP8jgB8A8AyiM/JrIVoiiUQikUgkXk/8pwD+eQB/CeLJ8f9BNkj+JgD/Cm6vOf5aCHeJ+Cv6+Sn6+X79/Ef1v8fw5juscyKReMFIEiSRSLxofLl+/hki2gB8LYC/DOALmPnH7CQi+nQICfJO8NUQAbNfoK6nDiL61R97lROJRCKRSLzM0HXFPwfgzwP4EmZ+Ho59EYQEuYWfQkTlBhHy6fr5N5bPr2Tm/+pdqnYikfgEIDVBEonECwMR/UJINpgfgwiifhpkB+X7IgGi+NJHimlaVr1x7GdBdmxWAuTT9VgikUgkEonXEz8DEvryHZEAUTy25gAkrPbvvvH7l+nnn9dPC5n54o+5holE4pOCJEESicQLARH9UkiqOAD4t1QM9UcBPAfwhUT0NJz7mQD+/UeK+nH9/FtuHPthAO8nor81lHUG8Dsxa4MkEolEIpF4vfDD+vklmhEGAEBEnwfxJH07/Db1XrVr/i5Iut0fgmiDgJn/FwgR8uuI6B9eCyCik4btJhKJn2TIcJhEIvHx4ucEQbA7AJ8B4BcA+DwADwB+MzN/MwAwc9c0c78JwJ8loj8Mian95RCRsc+/Uf53AfgnAHwLEf1RLfN7NaXd7wTw9wP4k0T0LQAOiNL7CbJT8wXvfnMTiUQikUj8JMEXEtF//cixbwfw3wP4xwH8aSL6LgCfBeAfgeiI/cpHrvsRiOfq/0pEf0T//jUQvbGvWsJk/knIOuXbiOh/AvDnIGuRzwHwCyEbOT/nY25dIpF4IUgSJJFIfLz4fAwtj+cAPghJT/t7APw3zPz/Luf/FgB/HcA/BeBfgAij/mcQT5CHG+V/MyS05VcB+LcBVAC/FUKEfJtqf3w1gH8aEp/7RwH8mwC+5V1qXyKRSCQSiZ+c+GzIv/+38Nf12A9DiJB/CcAPAvgaAH8Ij5MgF8gGyzcA+GcAvAfAnwXwNcz8XfFEZv5BIvo7AfxrEHHUr4Rkl/mA3uP3f6wNSyQSLw4kqbITiUQikUgkEolEIpFIJF5tvPSaIET02UT0B4joJ4jobxDRHySin/7JrlcikUgkEonXB7keSSQSiUTi5cBL7QlCRG9A4v4fIGk3GcC/B+ANAH+7CjEmEolEIpFIvDDkeiSRSCQSiZcHL7smyFcC+JkAPp+ZfwAAiOh/A/B/AfgqAN/4SaxbIpFIJBKJ1wO5HkkkEolE4iXBy+4J8p0AnjDzly6/fw8AMPOXf1IqlkgkEolE4rVBrkcSiUQikXh58LJ7gvxcAN964/fvB/AVH00B9T1v8vb+97+rlUokEolE4mXH8eM/jvbhZ/TJrsdLgo9rPXKmO36CN9/1SiUSiUQi8bLjQ/jgjzHz3/xulvmykyDvh6TjXPHjAN732EVE9BsA/AYAqO97Hz7rN//Gd3bXW0vC4FBD4W8mPZ/07/Uk+42xnBDO09/pltPO2zny8FLXW3WM14dzvd5vc6+r+qzfH1k6XzUz9sHHgKu20PV9pnOuxgLXbf8YKkE9FrIeX8pfJokNc/x8u/p8xL5/OzwyjvH+0/H+SDk36jX1+XrdOiA2TgU+JjfneLz0bU4gxqPzeb391OhQV6/z+jza+Mbyl3vRY8ceOd/bv/zm84MeKeexuXrrPaN1fxQfYbLfunQai+X6aS7FvnybdwkxgD7ecVxuTP7wvrx6N9Hj8+NqzPTea7sefR/wjfM/wnvuqiyr+jt8v3k7H7vu1rv9sd/ivLpVziPj8yP/4e94BzV+7fGO1yNxLfIEb+CL6Je8uNolEolEIvGS4jv4D/yld7vMl50EAT6imXDjAuZvAvBNAHD32Z/NiAv2W6VGYw0AbwuB0QFqBGr6c1x4b4y+sVyzMVDHtVQYtMmN+SjgS0jWU9UIMkOjETjW0YyGfTSVOoG1fGrjdy7BhrA6HmI0UtO2FrmNtZEK0Df236mH9bPdu8u1ZOWufUjh3moI9SplS70Y1Ai88U0CgBpJnUKfRYPEzrW+p6b3rKPdXFjqcEidy6G/2zmV9TvLfVbDw+4dCQ7i+XcdJz6K9wHtZfR3Afq5j/li97G/O4CHKnW1e4T7cWWg8GSYexQb63xrNPWz9e8EHmN+i7Ah69/FkK4XuDHo4xDmiv3GRZ8NLaccoZAufQ8maedGPgb93MEnaSNbvfq4z6iI9kNhqWswwnkvwKVM9ZR+i3XU74VBlaUPdZzwXCpjz0W5AOVC01wqO6FexvPg/QKgn4D6oLfpADUe/drH7W3OtpPMm76FubgB7Qz0M6Of3oZQ02aCZKy6vo/YntfK881qKGAy7ml+Znm5ZxwPK0L77zGjme29Uxh4sAd9PNS8yTNv7y1i6fPtuX5vwPGG1JMBbM8JXKR/rY94m59VabdWYGNQ1XdqJ/Dzqu8HnSf6zkEjGRdrR+EwDmHeHIRy0feL3qLso03+Lolcyzb6x8kgHv8+2Dj4ewyPkDKb1Gmqp17g91zIt/hMUpf/+kn//Tn16XmgIs9A2TrqNr+8mQm9EbA9xoAmHsE7Wo/Etcin0PvfIU2WSCQSiUTiY8XLToJ8ELL7suJ9uL0jcw0C+MTzysV2JuNubNy53dYVKybjW4wg+btvUj6qkiC6YiUCqHYhQgDwqaNFW6WyHDMDu9EwSCCkCRhgtYic9LBF7rTg5mDfBAIjLq4Zb58w2exNAsgtHjHkWO37ctBMJnEgVfRvqsHYKISyA70R2GZiMNh859INO6kzL8YaFwYVgIrtGofr9bsRKVdGXSdpTychSsyw0Hbwmefzzfg3D4ImRj1IDS3GMPjtOAEFxUmJSYbHxu0gEAaREQ0mIQx4ECM33ESmXfcV3l2BNFiMJ4RTjAiJt6D1GShL/yoBwmcGm8Fb4+CT9gkrsQV5HiBzX8gNAG5gPlK3RkDXPg/jIHM8GK+EYRjbtcGAZyWOEI12AvpZyyqEvi3t3RgoMp5GuFm/9w2BBAVQ6XEviiJkoH8a73MSg7WfMMjD1QDG/N3Km37X95eQY+W6Hh1uXMt7YBAScV60u/hCMkZH/xfnQrgB2dgwyXsvXgsAleV9ewcnYoQItU4g9LtBGHItQGEZC9hcwSAPrR4034u7zBOy+WJ9U6W+ThYYuVhC/3XrQ3mOjYTgMBf8IbamxWdG+5Mj+VSunz2/tIR7hi7256pgIvyIw/uQtSpGysR/m2x+U/hu7wl9b4MJXZ+3Uhjk/z7ZA594B/j41yOJRCKRSCQ+IXg7s/dlwPdD4nBX/G0A/sInuC6JRCKRSCReT+R6JJFIJBKJlwQvuyfItwH4BiL6mcz8FwGAiD4XwJcC+C0fVQlFd8/M1R6QnbyOsXOsHgKzHkDYJVNvEsSdfguJCTumuJThsaEbe912r5dtWLadx6CdMO14xvLjd/MgiDuUTVzKsbF7I7Qyu1nTTlc758TDy4NYdrthYQSA7sTKl94IZVMPCN1tLObybvdh+Y2P0HVN3Mvbk+FaHndpWXd92bxo2hgIa1cM+5Ddb738orvhNNzK+4YptIcagJ3Ue2f0v4WL9G3sFJsnTBz60pbfVw8LHT66xIkwvAaszRKiApjb/ORRQ2NqmH5GDyFC7lkTb0EAWzhKnEMxDGaFbcAzEOc6b75pfxuFx70bQKQeNXfdj6OwDF1h4FA3Bw/pKqBd221zZr0Hw72XYjjUCGuyuvLwjughJGcZk3JgChlrT+XZ6HcSatHO+jyH/moQRxXvq4OmsCIPLQtzwcM1oPPU+mANPbEydP7SIbv40SMnenqYlwh1eAgKgaawI2oSslMuVva4tp1x5XVEPT57wPGEJAyFMIVZlOPGXLN3X4F7dR1PWYb5ro9+2gl87sC5ayEM7kB/r8wVIgbvOpiN0N5oN969UM+tMkIRzfPK5lQbbY3zpJ9x/U5hgoeVASj3BcXC6zrAJ3kPWEhOv9N6xHcGBy+4bj/RCL8rPHu52bvb5nId97ewR9oJtalHDgfPog6Q1xl+b65jLsT3en0u59bLmEz+T03o134eHjJMQAVGuFjio8HHvx5JJBKJRCLxCcHLToJ8M4B/EcC3EtHXQpZ0Xw/g/wHwuz7qUszNPrpUm5u6/Rd0PqgTOAol2iLX1pjmqq8LVKjLebkvTgwYxiJ9hIlYndZY7yne2xbPRerhxmPQYXBeRY0bDqSHaWG4L9CyKBcDgNw92l2yg8FDBeIKTwCfGf2pGoamv3Ev964PwRAN0hhSiH4a6WTnxWO++NeyYwiIGbzmzs6jD8pucfjhfExck4Qd6DiJcTePj+kB8AYPUZhCSUJde3RRP43fWe9N3ciWuS5mZHp4QjT4lq7wKRbqaXODq4yrG0Zex0EWRd2Y9T5GGNk1zretWik6NwehN/o3hvG0J1Y/mdvSh+zXmXG+vUWoD2a8Lw2NBAED1HjqI3tm2hNtYyUPkyjHuIeFUpEyeGWXAptqb+zvJXAhtCfSh/0soSn+PJWhHbOSlV6e/WAhXEp4eH+X0I+7GrIIxM+huiMPBNrDfIh9YXM8EBrePwfUeOeJY6kP7P1nVSwXgDV8zPqE+qi+aaBY2E40jqMe6jCk1YA3Uq8D21nmUD+VoYtyAP1U0e9C/xXgeKP79fV5ce2afmbEcD/rEzoklI6attlJkNFHRsj0ikCWkb5rRe/FutT7FEB9ThPRcxCDCnk/DTJLjq/PLFkYTcMULhePO4JWiP9uY2whlXfkz93UB/F9wTr3rVgdr3IA9R5KiC0PfCjPyBoPzdI+q/dIfPR4d9YjiUQikUgkXjheahKEmZ8R0d8L4LcD+L2QJdx3AviNzPzhj64QgC6ySGc1iK40P3TRPRnfheZFa4HrC8TrRYQPamQPw9JtPdMWYBE+jdoO0TtgNbp8d9Z0MIxEMOO6ahlQ47FL/ZxPsN1Mq/+yKzj6hwapwwQ0lp1+q6PqA3Bl4CQGnxEAfCrAQU4euA5JuK95GVi/rcJ+TlDpTi3tNBMQrMaktYGG8UBBo0TEDOG74EOXRA10YvS7Yc+5EaE7wqbd8NFkeGCEOaDThiuhNNbdY0ysBjUlMGiMf9RS8UIJLtwZDRg34IoRLkEoVq8VgzDseAcDeiKFiGa9CACbkVgYvxEPw7/sUA8EdnHQCST9J1oXBcRihBY1ysrBXp9ICnm7YQTPYkBiaK+wipQSax20T+ouBfSNRKyUpI0g9Ybocvz0IdGlOT3D0AMpo919G3MeBNeo8B33I9aNRv1jW/S7nUtxDLUfp3HDqB8QfqfRTysRIsawCokW4HgCJ1tM+PjKa4bm642sK/sgRvxwVY8BGudRCxo44Z14+vB47p0s0rGWssj7h6tUikt87w3D/ip7j/XZQvhwJRebNQIk9p0Rnd7/HAieOE48yjDNDierbI7eIOrkJhjeXSxEKpbxtmddhGHnee3vbr2e3xp9am0Qb6bwj8kyF6KnhxN0oc6RBLZy6z7GphLQK6FEbanE2+JdWY8kEolEIpH4hOClJkEAgJl/GMCv/LgKMevL3crhC8NhoQfjOxIRvtgkkO06+7GxqASxh2LEc1rYvbvKEGCGcJf6UYdvKIonOU8CqJMtFMMlguG7Gs9GcrgXSiR2rP5GNqgbti+k+yiXixpHIQMBnzpwAo7zbHxfudETJFykYKkjzV4N/n3qwrELHAwaQHby7aR+1j+beCj087Krax1YQr0gYT5z2BHUEwgTXMRS7+fhLmrQ8InR6mJ8WAO63TMQAXY47PgzDWPTs9BYvYEhurmxGC+RQDFPEVwbptFwivWw67dngQCw/gwGlT0D/SThEzJfQ7YUva40gJldVLXdSQH7e1UUNHpCYR5P83KZwkxohFyBxGuDGg0xS5tzNg4gJ5t409Aoq9tlGODW78SjTv0MJ7DYvCPsGdHyy07X5ATjaqe/7GGcaZASTUM15FmyZtEYayMHnFib+6ifpG/7HXs2Ij71EYZRANq6hJx0EuHlyuNx1LY0JnTLVnUrHMIIPns2Y7aX8D7anpF7lIzncnj99A3+bJXw7mxPF8LA3guKSFoQ05JhR96zMeOUv+esD5V4KCoU6nPLxnqTceUN07tuVMCbMkic5eU9vKJYPNKO8H5n8UYpxxCRnd5r2t7oPbMSVXIP8t8jgQrAPWk8/XSR+RWPWx8OL7Qx16wNLZLliY+Id2U9kkgkEolE4oXjpSdB3g14Wk93+WZf4DqiFoadB8hi2eOw1eAyjQo1poksLei6ZQg/D4AbcVa4ud7LAjUQKlY2QQwQ3zXF0B8IWTpAgYBwTxNL+xjc/DsNWZB+y+MCgyjAMBQt/r5eCL0GV/dNMkF4FpCrPMQYVkaluX3WTCM+zMvm1uUxReViOHsYw4lHOMipD0NOPXzE08f6dfT1ld5IBXithVtK4yfToujQe1pWEsL1PCjaTsZsuCEYNhzIF+Jprl3t5gPgDkRCaMqGEcfWOjXupq9l1etzrS+tjlEXxeaj7UB7+62PSEJsLHtIe9pH9iQjndx1YGW8WPpLMyvtF7NcaYSfrUxShKX8NK+pQzqGdjEoLfxFdufJybJ+7sNINK8vrYv079w+121ooy1ugGvGINNSsX7wlL6njnLXxhxscpMppa8zFwzS+VTPDaV2PDk1lNLRe8FxFNQ6nrta5XcrrxRG0clQiFFKBzOh9SJOWHpvu6Y1kr95pOxmppEshUnSfjNwaQVUpTzPOlIZl0uVlMZbH/1/Pyara8ko8bnObWu3h89pJi1phGY4IYAboXs2qDjBMc8zK2sqQ0/tBOwk9Yzvwz48stiuDWQqF4CVdOhnnolT6oO8iOl9b6StBuncXLyJAAwC155fm5uQOYaDNM2uvrxP9hIP7QY8AxnV+d+Yfr+hP73xzk4kEolEIpF4yZEkCDA0PSK2Phk0V6ghfe2uRoHv7gcjjNRoVuFVcg8NXYBqelcQo+8F/RgMC527/Yl+JZIo9+h7GWk+rd56X78YGJ4hdm4jcAsaG2y7l8EItGb44j0s+AEwVBulkIs0lgaQhU880PAQCQbsZEyol4fvIEcDfSFZQJhTt+pufPSqiHobBDGEqACt8gibCalRaS+uMeDtLXxlQ3tY0i0SwSs02kYWKqIkFltqVVK7zr0DBpFgO76TJ0icSj4eYQdY2zeHtNjfoZJ2DDTpJsRjXnf7n02VJ32QhKwECBMQNRJsXkRDKoa0mEHrDYN7KJjRDADMBZ6e2iuDYbhCxoC5zGNUhEQx7wZL92nplM0YJlKCqBO4D8OWVQCXn8gnnaSDODxPTo4xQBp7NJEf6n1BVVihSSDZujcMppEX9jyV8FlqRzfyo4wYJD5JXzHTEJbVOjITeiu4P6q0sRXwfWCw7BlSnQrXq4gG9qaM1tbFS8Q8XcyjZi8i8LwIP0+6JzYk54ZCjNP5cDKlFAYHkqY1mfD9DSVDSNoOAL0VELGkcLWQnsWT7oqP7IRuxJi9S+O7Pb6H9YFim282Nl3ejaZB5ASpDbW+J93bg+D6KhHuUVIwkyw6Fk6A2DxwJin0bWWdk2MegeTfjXrq02VgyL8lkH8vuJE/C95lNv10XP25KIxtG248pTD2yti2JEESiUQikUi8ekgSxLwA4kIemI2vNaNGJ1kJu0iinljjNaEcMw6J3WAxI43KMNhaLcPwgezMUjDmSpGd2ojjqLLwD4YCAPRDF/dqZFDpk+FpC30Rkgx94Luh5PXgCvGkUIHMiTCKBlQHyqUMA3838UJfec/Geijj1sa9h5KsHvnByI4in5FQAAC0YXfQobovS8jF5AGhbuE+pNpPUT8ARB5S4nWJRJFtYgfNCAtD8TAJJV2srv6f7k6be77VKRIjwQ4an21qxjgWvIrcWLWTgn6MX8fhnFBgv9Mx97CM4LEEyLxnDKNfd+LpNIwqPso1oWGio8+qaL0YGXWjnR4eEI1eCiE0GgZkhOZxnieZ1ZdZ688mqEleloW/zCEnctxDF7SsW3oZ7Wnx8Y5l+jlRGBUAH3Md0aVPGrEYp0FHAjACTIvqcDJu6G3ocOxysumLrA5YKwk26ZfoWPYTrjR6ALh46yAM4bo6QBAhJUZ/sqEzsD/poEvw9CgMnNQLxELsInnr3hvaZ2Gu0S6xWuZZYWFoHtakgqkeQqbZpsYYyDwxLzHw8NqK4qv23nKB1EUvxMKcbjm3AfAwJdco2XgIl1pY1UmHwOoX36tGOm0MPjNwzMQrV0Y7hfnD0o/9KqaS3EOODvKsUvZ+ISV7GMCljuxh9u+EkVeJRCKRSCQSrxJyhZNIJBKJRCKRSCQSiUTitUB6gjBQ7kl3Bm1LVdztYbvGR3QNkF28fqI5CwyTa3BEMUP3VtBd8s6WBYE9fj3GzPsucSdxFYe4MIsGAoYnCDFqZdTawYVAndDUDR7Qnfc2XOb5Uq/CcWhjoIrHCG/qvu8eDstu7GnE3U9OG+atYm7YB7k3Cy4FdFm8BpadfN54aLI8BnNdB1T7YTlZxwkNoCA+SyEUoVyg+iKmw6DFnURIEVVDDRpN1CCpeOLqVREzr3iYkNWFgygrMMrTcrgMd/QpzIZlh7ub1w0gGiRl6Z8wTtRp0jOx+3jdrR0cPHhsC52u+xGYvWuknOGlYtdOz8RSTj+px5Dt/nfdbTdPGHuuLGPHPtcVGF45V4KUdr3p4ExeDKOM1ePHwrF8Wqu4qWcZOWSnvBbxdhgiteTHPXSrY3gfjMcJ3bJLedgQRihErJNN5Y21PPNEwAj9AtwDaQqHCqFPq2inCa6a2CexZsCJz9yNcJIokmkeN9ZeOydmK3LPCm8PuThxbVYxAp5JWX0rU5psYhWljaLOdQxYzJ505YnSx+fkoRF+jxmWoripo5hXj3l+6GfMcMOjrDU9udQ3fPI8HtpMnydlh2TJCuNFHeD74JWDMX+ZRgYYEKHdYUofbR5tPfzr7eVY2/W9QeoBYnPdNaOirpM92xj92TcWb6BbwriJRCKRSCQSLzmSBFF3aI4L7wqwxoJ7Ng49F5AFb7nQWBxb3PwaF14gpMSGK+PdXJSh4okMuT8HF2cKoSWW7aAF43o/dwmv0RAEvtRJ+NTFGQHRACnBuCaIrkntKKcO3jRbhBu22iYjYsyQ63CdBXQlULrGnldGOXVxdQeAO5LY9G4EC836JcSgU0eJYUihn40cqlt3AkbIoaGbwq1oHbSN0a083uqtOsIHKoNVbwUn7YONUWpD26tobFgdjgJmuCbClXBnaIv3OzDCjqLIroUoAZNoLQiiwVAka0fZOkrQlyEgkGRibfUY+rSKZeo9euzrbkQLPERq0t2IQxC1PADQQ1EyMLQzhiJ1mow5+Q0oz1UHQg37VevEIAYdjzAgq56J/modo2hsObBksiAnAFxfJfZJ1VANTXkqmfYSI+4AACAASURBVIzY21juSTOGAETXWYi4Ykpl2k+YQIwRhkHDeBZdCC3jGAQOE1AKXZMc/vxhhFFZPcw453EcNOZqP2s/bxoaBKA9XdJOx/eckSlLqIeEg1hYCU0EGlfNMlKUPNQ5FUlCGxsjPupljCOT9FN9mMkFskxCfdTJiLC+4YqE8P4q8FS23gTVOelGdAWyqjQMwsT6TTPDFLqeoLfmrI1rFKIWsnj0IVeW1LiAp+qOKY4BJQZjGus49mQhT8D21vy8WIYi78NQR0tpK3OdnFDxvg4ivd4oI8P66JvSZWI9Fu6TSCQSiUQi8TIjSZBoXIVdNBf5vKEj0U3ewAwStl2+xZg3NFIhyXAKi6GBTtPC3LJRAGrYNVv0ktcNUI7jjoBaRVvBsnW4sWLETCBVgmeIHeNCYgySeaXovQsNcUUznI3I8R1J8gU+yAxZHobnSUiacm6wbBKdwqrd+k5j8p0oCWAMMoSKGvZGJESPkKqeLYWxncUC3jQdKBFwebNO1qYJ/jVrHzQDBi1EDzEI5IKzTOphM+WjDBXuGGRDPGaeF0q+uDaMernU2od3SOQuOk2aMKTjZETQtnW0U0EpfWS00FtZu0phHEcVkcmuGTOYRkYNq+LilWR9sD8/Sb+biG7oc+nExVKsLHP+ofg4rQKcbDo51kdV9HLqqbtTC6tWQewT+74fJFlT7IdDCDbL4BIz8gAY3kZKHJato5Yh6nHswjgcD1XK2ReiqMo9XAhT5zqFuVzfKv6Mu8G8vDssY05M+zp7s2hqVnuO4tzSTDPU4CTU9LzddXmv3A0Xi7o1VHsX8BhTonns5Ti5vtBxqUNU1QYEOk6nDhT2ucsmJKpj1pVka0qCUQ9ecySEU7mQk18AUO+1XuF9y0UM+B6zXkUyxPsrzC0TPjVSUd+7/q44ioyjZfrRsUTlmUC2uWkkc0wfG+brdh597UQtA7Uy9l3eOYelGjb9mQJ/RhD0gebBMN0ZkvTB4dGz8bdsMFErZpDmRnwou1JYPLQ49LPPu+5ec5ZyevU2SiQSiUQikXiVkCQIxH3fxBLtu4HPcKG/4SnCc7pMMxIeWTBSNNbtT/MSIUaHZH4xQkWOy6KVihAS0dUZVp2DgMNEG3VnMghF+onWFgp18RAPKYMLo29lXugb4cG2mznc3q1dvsPJg6ixNvatoN8x2tMG80wRIy6SIPbfSM87dkTNEKwSTVR5DvHQsYCmFrVxYU/nKcZerR1nNVaMQDAS4bhU9L0OoU4rfiUxAqjRLKIbB0XT6F6F7JhFsQlZMRmhrN4twfKI6Ue9PoTJ8ASA3rVvo2u8uktYVgiikfq0FPY0qRZaxUxOwki6U3KyBQC2raH3gt6HJwuIHyVdpF4F7Vy9Dy2ci9XwrIWxqXCqEThEjK10NKbxmGgdOg9CpJJkDJm8YTo50WMpW2vIbmJ9UYuQH3Ze07lS3pBzL8cGZmDf60RGnU4tZJiRttcgUswAHh5O6vVEMtU7uXcP9PvxUCeiaJ4jAJ26eD4Rex/HcWQeYXLWLqvnG08e0HvB3enA5ahgJuytTmLKlRhHLzjVJvUMfRhndLsr2J8ExtXryNffidCDFxxpXfsbElNUiHH3ZPfx3I+KY68ohXHSFLqXhzGBS5Ek1KV0HVOgVHt+tUrRM4LgmU0s5e+m7bNUv4beC/a9qneXEomnNmVG6Z08rfCT04HWCUerPldsLAoxzlu7IpTs+ZH+BfZWcRx1ylB0Oh3+rO37qtas5ehcujzU67bb+3T9dyd6bqiXomX5KZUnsmpWU5b6HkHAFkcZZFAikUgkEonEK4QkQYCxo+i7nfK92Q4j6WcIs2BiWSRqytSrNA6MYQjrzvTkXl9U9V939Cguau0etpMdNR7WGPhG7j1iu9PyJTQOcEIhpvQ0zxDaISvsxrMlRJhICqn3cILgwqKZUMdv1DD0AXZC2QltH7uP3j9aJW9HuK3tULpLvNpdbhqHPkLXKnepPzPhWHewLRuFhu3wfR3ZWXZCtcwLtrMa54LdynaNyXar57pEjYEr4xbSv0xi+DIAbpZOI8wVG5o4V2yu6d9tYYp6lUd4j8apZyEx4xkjrEl3ytc5WzY1ugElS0YI0vl8oNbBfpmR14NRCAyvGr/+TsuuwzIzvZj4W2uSFckN8uCNMlJ5Do8dUkM3llM0y9K2Nb//VvpkoFp9W5d79RBadXc+UACc1NjeasdWQ8pQrdrdJv3QtK7RMH7jvKMzoRnpoUb4offoveAw76TC6E0MdA9nUu8c1muhZBBRMPALo1Z43Xsg2549v0PvhGfPz+Be0BuhPwQPqCr35UbizXGLxwspu20cGJiIHLa03Dbl41zS9yRtHacn5pHV8OQkf9fS8Z4nD1PftV6Apw8+3lUJu1o69lbROuFkaXPNc6cVHL1oHxQnC1uTDCmH/w7vO/vctqak3UysbEoWddOBUaLDwvVKmEs2/vf7hqJ9ZPOLWf6utYM53DP0s5Bx+nxtMeYS3g9FicGjlWmeWT/EsV9TQZu3FyDPiJGUnYqfb63hriQ89TlMj9vQkEokEolEIpF4hfDakyDEQNHYddfjuOjOOgFsK33z+gBmX2FP/4pBejBG+It9V00F88TgyqJLoAYrEw+hQb1mGPFS0ej2jCreGeYS7+J+SjhEt+bhMs1DpNHCE4h9dz66va/hC9YTACZvDKY+xcLTpbiOCim5UHYa5ccSC8Q7oOpuuXlBhDAJrjxftrjDo2F45fQyp62Mrt91iHleCXESuxdN4HvskHvCcNVIFwKoRUJMx9vsjgZM6S6tb5jkJkFvwnUv9B7WrNhHToYxRPtgKXoIOQ7xxen+1ijSUIvCs3cSxPuJbX50oAXSr72niDFeuhuTFjrhf6v3yvAUCbdn1YbZyyB74nzoJAK64Vh3AnCMoWlpMAGHpcO11Koa0lU29XipI3ToVh0njRcAl7PkK5Vdc/FCKOqlAAzyxjwa9r3O+jQAzndKkDRyw7pHooKXe1t7LyNspNvYhP45LJTEQogsbewanmZkY5PraSecAinpIq0MT70MzPOdK485Z+dEsrIR6k5wYVgS4pMHj+PaLpf3ygN1qYzn253Pi+18SBiX6vn0XrAZSRLIsdWwj7+1VtD3IiFRB82Es717tX5T6NXGTgSat4ZpG5Hd2whfnw/XRIDVw4VDGePfAiO04z1PEoIVyy+BZJsLn8kL8cSiiXTsfdRxrrPNVxptYyG3+AjhY8DQBrL3fxRgroxybsMTLZFIJBKJROIVwmtPgqAD23NbOOpvLDv9Rjp4fL8e9jUxyfVRad+PsxoDSkKYWGPx+G9Cu+NJM2DKKMJmzPAwWBgTCUAHRnaCTqj36tUBJR6U/PB4+jqMZFYhyn4WEsOz21j5q2ZD6C/LmOELZ/c+IfCp+044Gnl2FRNYvCJaTAzTbhUNnlgfQzgXnUZWmEZO/Njxfifn0h5Ebq2qppOgIpK8aZ+Y8RcJrE5A55tjsHpq+PnhuJfTZHy8T+ywtx1uvHGNx4NRvExC141ZhTxjeIKJYRKhH1Lqatfxvc6XQJCY11Cz7BLW/4CP6SgATmKsoUINQvDUBzG6nLQJ9aMWiivQjEeYMoiMto253M9aV5vj2l/dSKBF9HNodtCsA/FW9cw9YCEeOIRf0SFj9uzpGWCgqGhxCXoOl7vxojAPJizdRCw6F05YEU/z1t8dwQNq0lLRtnqWoRb6cQ9tLaPvjOQdBCgGYWeV0npSt4qPvohzZfJSciIvlK3vTDChPYRzw63a+Q4PlQfB2Gn0XRgTI3MmTRIlX+ggbHZ9ID5JdVs8tNEETO15rzZ/5JkXPaah9SKFLGOhYxD7i7Qf6xGe5Rvvf/93oPIkpksMHPH9Gp5t62Ob+vdP+pizhtgn9lPQIrIyqA1x75X4RNfXgoUzBgHafmL0pwXYl2sSiUQikUgkXgGsJmYikUgkEolEIpFIJBKJxCuJ9ATB2AkrMQWF7aqqt8eaKnDaeafwG8aOoaVu7LrjR3G37wBq3OEHpnSa4/fgdRHA5oa+6UEGtmfkKRKjy7tnXwleJ1wgO5O9+C46FRpeDYsuhffLuht57SkOmPv+qYvrv2YV4R48Tgw1eHvorusIC6GxHRpuOfowhFDYrnsZ3jX9aQMK0DXdcdM0qnzioY1SWOpbRC9hFTSdBD+t7Tfc4/1403TLse9sxxo0skxEpxUKngO6wzt5Bekc9K82tl4CDY+BKGxrKMt9FjkI37wPoQSku8gAQB+uI5yKgDUl7shAZJ5Lep3vkEubfW7eSLspHkuL5wPFdobfrE1W8XCPkVZ38UKIIWF6uIdOshApYvOgoCk5DLp4ffRnMucsDWx8L7AJGEOeNUtl2kPaa0C9R9xDYPTzEL/hyVPDPDo884ddr/3lj+QJ/ty7J0lhlHPsaGB57UzPd2nsGkaTh1GYT/3Enp5XPDmWR4PFE60+H/MgevRs6ikifSblbKvnEq7fuX68By+Z2H4gjJ+UXRjyPK4v0EIuiO0hPbc8QUwnKIQPWcieewCqx14MvwMBsDTOJIWObCxyLoeQNeLgqRH1lTrQz2VuZwyN9GdwbiKPU6cwzFW7aoTSYXre+gnolzqniE8kEolEIpF4RZAkCGEIiwZjvJiKPgGe1dXWj5rlYl0gxkV00Hh0o7VvEO0CwOP03e7p5PcEgHbmoRFi8ffB2C9KSNAFrpcQjRGumA1FGmUBYqTQIcZfO5OHQrjgpzdq7qubLthxMR5TdloMPGlBZAa2nqApP40UIRbCpFwWoxCYjO5Yt5g6s7tOxHCtp1MHb2r1FbFstq1fxbpPmhY80sdS7WbDoO9FNAKC27ml4HSyyXU1omWmxy3chulaMyQSTQTwKViAFvajc5DN6IIa3kVZjYpBMESxymjNUzC6I8/iOirh+kgAANBkwWJkxuqdaIQQEV2VRdpdRvKxjsWUGrawpmoOdWoLYRb7yZ7HMK+pk2Q6MqNuJe0KO0EAANgGGdaLEjU7QNqG+DxhAzrYCQwpT9uEYehfSSis3wEX3vXwA3//yAPOcQ4zBTIJY17REsIV+2TjqY7OP3C4/hEej5QwBFiNcHJixfqw32la2jVTjJZLmmK4PpALCce5QA2SIlcN976xhxVNmj6Yp3D8jdVgF5IJk46Q11fJP+JRpoXLeKE298K8mEKRYr/G+9u7tWCEyoW4yRiqZOFeHmam5cb3PYBBfoT6C6Gk73PXew7Pv/25jqf1LUEzj81iz9IknkJ41lS9Zb8xnxOJRCKRSCReAbz2JAiTLqKXBWIzY57mc/2ElQSh5TgJ0TEbemNXmE4YC/YKUONJX6CfWI1GnsQzJ1KAWXbXGwAiiX834cpgnEVj0b0s1DAgjdsXjRD1YsA16WL1v0oNa7vfaky4qCMA5jIbgQQw8SBBWIzWEoQMYww709CuoODh4uNRgX4e917Hix6qiAGSGNhU2LMduLDgIeKMaDRrmNiO6OYMAOihTDvAXh/tX9FpmA35qe1m2EahBe8L+3kx/CM54r+FexCPco1gif0FgKkPsUiCCOGW+R5sKaKtWsGQpqZkjwlyAq5zY3/HrESud2E6DNpMMZz1s/LwGLL+q32IhrIIOlo9KbTX+2USGZU68mkcs/kUMZF0CHVUb6CmXktRt8Wuo05DRJmu21324JFj/Ra+e/+XQNZNY6ntNa8kJk2TqscXrycQRHTTSRD5LBuj27uEgLqNhnia45UgCuhGuBnpEsmmKplfSh2peS2DjPdVI3Ar2C9lzMk4pxuBLsU9l7gyyoOKwx6D9Jmes3C9a7eo1xefQlas9ZMxCacayTMRXEZoLBpI07suEjPWJzb+sX8M5lVmc7APsWopfBYnvuKSgvcQKbk6iKjxnveLV7Kwyrt2JVcpkmA2xjZfL3RN9rz2K4REIpFIJBKvIl77JY6vEUOaVxM79fAR23XbxnHquntshk0dYRi2Ji0XcvE5czmOC3w3Ck8MdPU0CEboWNgjhGSExT0T2DOd8ELEiPFR9tBGNZoAyf7hbv0Y10X3/6mDjAS5EZ7gu6mbZmBxg2McGymGCW4ykZzPGDvvIEbfzNBmN3JpcCtet34OBEAkP9QALLqo56KGEsmucwdGFp0jDBjT1U40b0VCCg41gIl999/vF3aNpU7LOADDYDKj0NplBucR+iA0xneUF7HSlYihTsPAN8PJuoZmYkTODZffEMD0cAfovO86vir26C70dn0Jttgh9fB+Cn0AMzY39mwcfBQlMYqHTjmMpFByiQrm8KQYSqJCu7R1IUgOEi8g4DoEZ7U6i3gNYcMgFQCff5YVpzch1YxQq9q2zoNcoCppaDuTpyY2kJI9IHiK1BVUxEJfUxD3WBZDMqMQj9TBOmC9QbKWWNMX4V6be9xpaitI2kTbIJ14qSIZicZ6TJ9/Pw4IUVIbcG6SVpZ4SlHLTGiamaRrPE+7GOuoz4P1cciM5P3ANGVsKQX+vWha2hjG1nsZmYw0a0qPnlLhntZGEDvRw6xz1Lo+9GEkE52oK8NbzD3FADQrw9ILL15KMQU7nYRoArGkUOZBhln9/LdAcMY2TFlteIy5/IHRr2WkbJ5IkK0Pb75EIpFIJBKJVwivPQkSyYnodUDTppga0zFWPSwixa2b5kW0GYrAlNHA49U3iJFWxGAyjQ+/407DayPuKMddYKtawTDiPRRB9EHKw/Cm4Dr0SYy4aWaYxt3q2OyKybtk3XS82kyOdTADX125r0y+ose6Luj1N7ZUlYRgyYwbeR+ero/F45OmwUPUiiBE3QGuarAbGdDCbvQu/Vsveg0TSsUMsytMg0V3t+3gcJsHQLOXgdRvZLSgA5MeR9yNFg8LmnRP5IDtbNPkhXBFZAWigwOr5HoCcc5PRFAgBQiDyAk2uoe4dBpkR6xiI0Db1nsBLoAwKrpDbs9L8EiJYSE+xyucVET4ZNOWIQYjGNRTBcc9/FrrZ30OxbtAUu1S6HvWEKl6bpJ6tzBqSKFb1PBujVAKo5MQFL3V6YEhiKEe4bxmL8JRHNW9KajwbU0aJk+t631gNwCm0Ia+h4GKjB3xNA9ADG5CAk16FcQjJawRAtFDJHocBM+PsnU0SPmWrado35XC6N2iSQhFw7+cgCjy/XRq14SG/01KBJB7dvVeJ/LBUh47QRFT3hq53YJH0TQwSjwZmWVdEt/D6mkWh8eJJgubIwihsbXHx6PEesHnFxGjlz4ROYOgYh2HcF2YazFUzypMC4ltaYABoJzaOE6M8/kY/ZZIJBKJRCLxCuG1J0HEO0CNqMXV33U7ArEABCN8G4tf7hjpBG13NIpH6pqYTO/iYutfGkQGMDMMeuEQrRsu1D3s+kudScUntXzVN5jTwgJFNT/M6I/lXKdZ1bbF3f9YPQ6ED8S4p7YYmXY9IaQdZu9nrOda32G0GQiL92DDoQDlfpBF4n0wYt9dr8HIhSaeMVOIk3pomIitXDhi5W0sW/QOoOthsrIshCnqywxiglzXYYp4ieSX7uyO+wfSg+3cMfAuqGhjrEQARaNN+4pLEIKc6q3zOHh/xPqZgKgTKGUp22oU+iV6PaEXTUUqv5mHjusRBI0Tf8YYk0aNkCwjbbWHhRgPUIOuw25WcmzkKJdiWIKP67iPzAWWUKjgsdMri+6MYetiBEPrsuk7pIRPphBmYXO5eH04jB0sLMsIUzWkr+aazad9HgfXklif0xjCE4mvykv4Vjgv1Jej0a8GPgUSZArNCO+9XsoVQenOLLFP7OUINchJSSQWYshua/1lfWb6PWgkHkQI757gLXblzWD9QOrp0QguWBw6gLUcsufRKhEJSHvHO9mgfUI8SMLCEl5p/eS6LvJe4yY+PO75QzJ/WuXhpdJHvbwcfVcIsyh9OE2V4IUSvUT8u9aFm4wBa6gTIO/1/bLdJocSiUQikUgkXnK89iQIKuP4VLFCfQHdgPJQhreAnkpHMBZ5GI1GUkzZYdgMbLtYjA0jVupFDXIWosIMlEmY1BbW/hlE/dQQde+DG2tV4lAvBkoLxEDTYxtplgpZCEdblLos5DuC0R4X+/p9DaGZzmU4QSP1VuIndOwqkImwi80h5GjVN+FDPV3aKBsg957gTcNl1AuDTHQ1dnE0nrfJhND7ByPRXNaj4co0G9u0fFrMvRq2smkb7lPEmJ3Ji0HC+NjoeWWnIa5qfd150CIEDaEJxjOZcc8+7rcytEx1t7HDsFOdEFivpTB3jMRpoZt7mG+NQJ09m4td3rdBhHAQno11WOceMVz1kzuNXo1uXLeeCyMVMdpkz8eUzaOWq7Y6GaUeRHRcH0PgRUSgWNsVSAi/Z+jn2G9OtFa66osYemRj7m3toX1m6Aej2zqOVEMFG+a+muayGd1XXSjViYKsC2R8yXVFrt4J8dkvI/RvIg95cEzjpnCCbxBu5MTW6qXGnYcwMkL7bI6bto0RCrGMQGJH0m7ywrp+ZUgZwYXC2ujzwjxnbGj9+QrX7xp2dRDWd8qqazNpMT0y76+0cOzcLhUxPSMuSjqxOOitJFYikUgkEonEq4BHlreJRCKRSCQSiUQikUgkEq8W0hOkAvRUtnNjZop24imzBBeewi5IdTyAsUO4pgaNgpEIIQ2A7CbWStNO8pVHB4Wd0QLXi7B7lAPouitJpGE9ttu4wV33i2qRuMeJ1pUOoHYpgytNYpu6J4raMWkjWF20m6YsMhTFBvWEqM8BXO/UogBdRUttBz7uvkZPGNPk8F3jGoRNAXjKR2vkxrobz8BTEUx0QcRV2HARR4THyXfRMDChRhIBQdNI6Kt7vvWfTSXbYe66q2/3CN4lfBS5H6k7e4grIRp17I3QbWv8RqgHwg5z1LuY7nkrRaqNXZjvUzhF8GaJO+IrTOvEPBkmCYtNd9C7titUwD0mABDzyLjBN+oTPCd4+Z3sU8+Zs4vMdeVHfo/hZDFMyb2JtOzS5Lkp5rFhzyqNttj85lvCCja+naZnniY9mbm99nyxaemYN0H0oggeBX5/LOVFT5+4219Gnaa+WtRRTdDZy7HxsvILjWvU02JkuIGHzVl7+wlzGIvWaZpnoQpTxp/oNeRt5tnrptHs3RHLiaKx8XY0vH2uxKAjHnOUmDxV5ER/HgrNIUQ3IILV5CF0HMeZ5np7aFIcy/gumLyPwvPPoS81M5YXewStnkQikUgkEolXDEmCdIAf6ojfB3wBOTKPsMTPn8Zxy2gBqF3RhllnmTncXZ8ABmmWEs2IsREOluwsUyaPxTh1IqWyiKW6joKQG2aIWPpdJ0FOjH7HfpwuEkrhNv49oTzAtRssdCcSOdSUeFHBTS/bjIJtMTaicQ/A0kcMTRMMIye0kRoNnQPGMBBiLD6NujkBQazpjXkYwC0Ypru4ybenalhXBp3lYBQZ9AweBy3shRm3qp1w6L1j2lKLu49G+9anTA580OyGvxpVWx/pRqMobKgnOklZm5Ayq0iih1sFvQCOJIKJW1rdo14ASx+Q6jCAgtGqxycR1DV8oEt/OUmo89G1WVRjg8+Mm+l/A+klpIAa4zHUKPS3zScAQ7cnasuMU2fYsYLrUA6WsC/PQtNEu8TnfB2pqqlLdiUUoHm4mxmq4b0xGcFzpVzKIYYpkEZahTIjqA+9G0/VXIJxXEO/AeD1eQz9EMORxg20PCUJKfS5n2LvtIkku3ETJc64BFIL8i5hYnAJ5d8gGSbSK3x3YV47XO23yG4gECCY57rdj5dy1iaQvRNHH85k1RhHrIRGfL6WdkzH18bGeaDaQjixp152DaptGddIaoU5cD3H52eemMaxG2F+8TlLJBKJRCKReJXw2pMgtBNOf20bu6gwozd+xxARlKskDl3JC666UNXFfFEhwrLr2Sy7nVyAplYrnxjYukgaxF27y7AILE4bhCEkGDKv+I6t/VYxjMwC4CTb8rR19EsRLwi9pj0vqA+Eek+gHbpTz25AxR11X2NH4xHapjoMs6gJAajxtw1tir5hysIzGQnB02AYRTy0EdZFv5bX77rsirOMZXkgbKYPcAD1QXZV21HFU+bcF4MkZH25qGUaPDG4Vr8fNd0djTByxzqAgH4eY8iVUR4CuWD6ANFYV48VYlKjeBByTpCpFwCZRohXP9SX5mPRwGfrb0DFN0M7rN/NYMRUBa+vj301Q1v7SEkR1rFjFbr0FLnVtFl41l2JhMihQphTtYaxN4w6HqlZAVAdYx3JMi5LXyyE2pVxZ+2zOU4LCVKEhLDnYoiHjs/h8RXIiINmYtHINfOQqHM/e5abG8bzRG7YvSn0Y7FxWIxfL0A/tX+cgFwyRsEeg0csYNpp9saYDmrbNWUzl9nD7CZMRNZghEcdQr4rhwAs/R3Eh2fyYfEowZjHXMZzPgoMP9F8/fQOsrlm8/4GIkGyetfcnENRMySkFW+W5tnOW0Wx+UYZDfO8cGJ+ee5J77dBGmLkTmXRcrpS5U0kEolEIpF4+ZEkSAPuPkiLkU1oZwyjQkUrXVTUdsjUoO9bEKyEiJ5yEQPczu+b/Fc0nKE9YfQnLJ4hm6bkLPBUkszQtIy6qCcGnWLeUlmgsmeUoDmDQAewh8wKBNB5eCh0AHwm9JMQIVakExo0iBvLnuJZdOwWd90X6vKDEUNmxYT6bMM4vNqNtHCLWzvTZljTsvAHBtGju5h8EjFLSyVTdiEtykEoH1Ly50xDHDTcg6ucz2QCn1KGE0HdSJBwnfWZh0GQewOY8dA3jPt53cdkM4LMfrYdX/Oq8Gw2LIPSK648GdawrWGQ64+xX4vWZyGCJgFNMyLtq4V3aDjPRI6EJvnuNVgIuTWMqhMsHbLf12DZPXS+c+HZYIvdFry2YvpedDHo2fqhwMcBLB4wkwdEHBP1fOhnndNbRzvT1If2HEWRy6l6nslozHM+yuMZNhjDoNXryAinFtRVcfXn/ENkIVt5/ALLWEKQODim6zKtjRQKaI4ExAAAIABJREFUCKQRM0AgMAa5GVM+y1EAlgVonYtLlf17CW3Qe7IJ3K7kQ6imZdNxEiR6XYVmT9dHL45IiGmoFoPm597eq8HDJ5IYV/M0VJCNKYkwItjvq/2k7033vGMebYvEjoX03bzvQupYO9qNvrS6lEG2j/cKS3afRCKRSCQSiVcQrz0J4q7vbfn5wAgPIYiRHTKrxB3caoa0LuSpC4ngniC6Ax09Ceo9od0x+rlIGMuZnQDxOnQC2ljA9id9Nt4rO/FBTTMkqAFQjpFFhE8FfLId03EP3jraGwSm4q7+PSzYuQD9LLv4vttaQhlVUky610Qr0gcxLl29AhCM9gi2/8X1dsgkAwBUJVyEiGeD0gxbM7RODC4dx52meTwKyr0QVBb+Ux4kBMkILYv572cexj4wiJxGKA3ukbJ6s9jf5hhBALCPhhYNM3LywW4QmiGeC+J9JNza7D1wFR7R3dQUY9PSz7IajoGYAcJ9jaC5YSROWYkwn+MkixqMTvx4qIi237ylHsvoYalMeSnX7hfn9lqG/cYAyg1PBDVYOWhlzOFZ0ulOrtgY2HQnFoO1hOti2IFpxhQGbTyRK4BOF5sI0UgNz4eVTYQRzlR4eBJQIECC/tBVuIU1JxKeAMBlCkmaPiH95iSmPZOrXklfjHrt98n76LS8pwIJ4rowSoKQjUs8fzXGKXYnWVNAmiUphtz599APxORZgm5l85nufWDUlTATIVaujYkSY6yV5pVfWr1I4pyI7z77KWpHMU0RmAA8DbiRRoTgCQiMsYWMx5V2S2Hx3rB+aYPscW+W6LXGgRA+YSZBzP0ms8MkEolEIpF4BfHakyBcgf098HS1BlvDymJc/wsLbTe2MBatRpJ0W3TadT0ImBpJcQ/U55qetpCX186BLGAhQkiN8H4XPFZI7xeMnRiqIbH4+r0R+gGACN0MGKsvQbQa4q6jlbExsHUhO+Lue9iRNFHQyaixhXRRkqayGHW6KL/SLCDIfawMN+50d7IFj5a4I9pIU0lqd1Wpb3lj5DZubxS0+4peK7YihllHGKuT6Iq0OyGivA8CCVF2MWydCOrDwCg7xm6xDc1ioDfVZqFD+z0Y+T586jkhniF8PQ7REyZqNDRCv4yQHu3gqR/JyAObz0tYEzDmuf1tcxcYO/pOVKwkBvOkGSMdG4xpDQmhBtEeMX7ghuePa4lEQsQ+zbA7tE4IBJE+ZyAlOhopOzmOe71MyHZqc2hwo0F6hH72PjxukxJMo92WttRDxIChkeHjOBvoDKlbMW8G98Ka3wneF+E+3ge3SBBrh6UALgCj+PMZQ+AQxUJtTCw8AxAS6NTnvgnkZG/FidmuYWAe1qflRQ2k8cyHjrBnY2MhRaIuRyTfIqEV5i4vpJNUTJ+Fek1SOXFncyl4rHm1Ks+EgHvV8NDSiG0pAOx9zCTvMiMlCklY1EK8Xzk+tVFvI+vld5IyArFBbM/teAdMnl3ef6PjqY8QMVbdkUi4GTGeSCQSiUQi8aohSZCN8fD+PsIcoIbIBYGIwORGb4Zq38Iau4o3AQBYJhfLyuLGoxkAUK2Ky2JEM9DPYyfUdt2NgOnHnOWAq9alDqPe+YM7oG8hI42HQQyCwYwT3x2uI+SGY6YXIzv2Mi2My4WGcQgz2IIxvDEstKE+L2K4mrEK+MJcstOIOK1pJYwBGkbr6kXi4pUq0MoV6HcFXdtYnhyodw29MNrG6G+IZ4h49egO7ZnBWwdOjPpkpM8Zwqdq2BGDKqPWDmYSfRUA+0MdRrMZlMG4oYOcXKGmdd14GFpFjaPgpUOFUXQcSukoRbwHShn3NtHT1gr6pYon/7pru2aCuWFo3jy+CHv259XnrhtRBeJ5BAxvmE7D8F+1X2gYsF2NLa8OLeetRn88Rz2jykGDFLNTGMCOsaMeMi+t4qTERnzoT3X0Cdl4Ru0W3TWX624b8ZMmjhExoR+mZ9pI0lCtEbpg/QAlSPnqXh4OVxayZvGUmMQt7VkC0FWHYyIk7Pop7EmMaY6ReJ0GKao6NS7oe9IwGwbwBGD722xzYslwFIx7bxOwEBQ8xiHCNFciMRs8ayyLk5fPGETqXgbRE7R0ItFSTk3mkJZRiFFqRzuMJQFIn0tA2xjel+Zcc+zihuKCxE5QqfjvER7ClQ0r14SWeyLuGJot5qFy6xlHmGv2b1nwJpmIfX9nLe+QpbxEIpFIJBKJVwEZ9JtIJBKJRCKRSCQSiUTitcBr7wmCwuBP2dE0O4WBHop7Ocgu8nCHtrCIqzh5Ey4lAI3QbHe+6w4cwcsou6ap3UkFPK93cS2Ehgk+UuahUDpQbKe7qtDqadSpPRXvBjo32aVUTw5vnwlRmqt6YQ1JCe7jR5XUug9FYvxVaNTSkrpWBkE8U7Yh3AnAU/ZyldCfss+7j9Bu9fMXXQo71zPEBJ0Lu5a6eM+4g8uJ0J6rl8TTiva0ST/cNdDTA/1N9erQXdxSGUXjnWoVLYDeCdxHCA7VJucAqFuX3WzbUd9G+hpWocmoW9L3IroPRxH9ko2xnRuK7pxbWZ4ilySVba3mCTLuZcdrmHdEjF76pDPh3cPzdVY/guxk+3m9+D1Me4XKmIzHWdrMrYBqH6EPDzJBaS/iodFpSuPs5W/iNcWnrh4vOriLPoGFC7gHQISL32q/3sjY4SmYtYhJvHLSksHwAAmhF67fUIMXi10WUog+qtfC4VytG2H87t4ftjvflzoG5wTXmAm79/5b1H+Jry2fQ8PLxbKreLvV4YF2OXEK1zGnEOJRd/PCiiFe90G8ghi9YAjUutcKo9w19coYl4IYtUgDCGNemudGV3Vm1nrZHI6eHVTGtZ1FHYdDJ/pQmJdIGBM6N5nHQQDUyzVPjhKeAcA9sey7PyOk7wqtpzXTzqu1ozOh6oQxLz9AvEu6eZB4JfVPE0pdszi59hJdZ3iyU6KWTOPhUWLzJoood/UuMe+78G5mC2+7vkUikUgkEonES4/XngQhAk5PjnlBCqBvVdyXLQwm+sxskkGCTEeAgFqHCzapkb1tDcyE4yjYn5+GQQGgdUJ7QzQtyg7XAYi6HpN+hxlOinIhJ0HaE8blfQ24i7oaAJ06TncHmDVFrIVeAECrbtxQl09fXEMy3JSLCIrWB1kcFxUVnAX+1GhcM65ASI1+koU2tdl49HZoatNyBNf+EC5DjT3e3TROJl0UDfMhks96Aba31Eg/EY43CO0po72nA08PlPMgNAAxXlor6HvBbmRRC8KbLMRS03rtpO2zMk6MchZjr5hBp3/LXFDiyjI/VCUUjGtq1YmQrmEG/Sjw6KxO4EuZQiCi677VpWxKTkQX/2DcxowlVHkyoMydn/S63gkUJvx2PrQdbSKKupJ+7aEKwbarHgSCACcgIUeVgXOXsAmr20Roqf7DpLuA+e/Kmm4XaE+ujUcPLTKdiFAGW9iHzS8XddUT7zpo62548o0UsLzRLBjLNBGh0z3Nto6ioTGkTnUzJtAcvmZlDsNW59A+srpQC91kdSuQMBrCyC5k97R6WPFhLou4LYCd5kd5IbXKXjyEz95LFqrRzuzvq/amhFH1NUtQCXPYQk6MQGaM0BW7jEN7LYxKmSIPQ4njZeO/anho30zkVdDRuEp1G8mj+Dwh/L7ON2ufiUgz0E8dVGcyiCFECBclcBaiyIkshod32b8vRrRyp6s6TVmFtG+cTOkQwjJ0CRcGn9jnphNu1QSxkUgkEolEIvHK4bUnQSyrQyEWsUBA4r/VGGXVB6BtWAJlYxTdzbyKC4eUdz4feHrecaoN9/uGn+ii3eDGFUHSxpaOdsLYib8EAyVkpzEBRN/wvGPRRSCgv9FRPmVHKYx20VXrsw28F1x2NaCPMgl60i4Lb1KjlXbJglIe9HgD6g4lZoYNwXV4qGhTh46HnaupJushBldneIpgAJOAZmmQTDxGgIRFvyzAjdAY9/Hde1rKIiFVTN+lXoDtOaE9IezPCe1pQXuzO6EByKK/PBScH5SIYqlPJJxM58SInCjO2J4w2t2GfpYdcTOYmhl+MS0wAcxFbm3jfYghTUq8dDKySY4XFQE1wgkkWi9Ry4J1J96N25CyeLRhGIHHWYkIm0uNgCbl9Co6EdE+byo0S/pcUJHnxoiVeu7oKuDJVedb5WGgaX2piDeJzEe6uZM9KjzqJ23RPlfdmu0kdeqhjGinsnpAWB25DR0VNw77qEM5N9m538h36WP9umnfhIwf6OQePa49oV4Ntpsf0aPBvLYPEI0O6zcjYjqNTDE2lS4y+ahh0vQoql1EzQxo1Tay/olaPOF55o3Gzx1OMAyNk1AG6zMeSM2yj+ewb0Hk+XnV3zgQoDLf+xbqUVQ8WG9AB8FT+er3SVdF+07eOUODxtpAbZAzpm3C8Z1Cw7h3cdzQTiOoKHxnKzzA+sDPD0KlsY39JETDFRnk74ZrMgyEQeqF58ePG9kJTHookyiyad4QS/9UgKMHmNaBzvIscStD+6XIM3EzM1EikUgkEonES47XngRBJ+zPzmJ8hJ02Alxgz4UAzRbpkF1rJjRdILeHCmh4ACrjeNjwcD7hfHegd0K73wALsQFcnG/KOhHdj+OnGV8BXNXQJpYwg0Y47ivomQzp+SdsR09DFlQUNRpCfRsL9e1eDKbVaOIKtCdwY6LfwcUxmZQ48DoaCTIMeC5CYPTzMN5jKlrSsB4jeGKojGVJcddsq9PSN2bE1ItkwTESpBwqQPthCcfpJ0K7K16OfVZrN0OyNkRbosDDDqKHinu7bFJuNOo4CNW2Ox4pZFWI0j1/YATQbFTOmWaUFJkM10BSFCOlwvwgmVMeLkXyPzMGu/aBjaPPQc1e4kan3m9/TxEHCh0/I7O6piLGWcMbtj5n+bHwMTXkuRcJl9F0zi7KqLfykAosz4Ke5GFoJ0Jb00kTPCsIab8YuSl9BpCFJfSi3jlDrLLvQnhS4YnkcyhxQrW7Jw2XQYJ6vVnJDkA8wmqow0ZTGFIpPIUsFXvfMKE1Qm9VQi3cK0nq0J+o5X6UyWuJ70mISAtX2xkoIXxNP83bxAmFEEJUTDyZhp0tc9caaM/ETB44YcAQYWACtg/ruIVMJuIxRvPzXOU5AjCIzqr/FdbsTDbQsbNHGQYPtwtdFucSq5eMp1sO14/QqdAm7ZP4DvJ72XmxbVoH84IDZNyFPB5zhew9oWTNlH7X/tZ3jhE3sU3xeXDHthK6h8fzHd9XN+HeYwyyFQFdz+1EIpFIJBKJVwVJgnTC9mMn0S3Q8JN25tmYAybXY9oLjsrwdJsATh8qqPe2iga4bOgbcH8nRMOmmhizYUducMpiPexQLtle3OZ145yHp8RegOcVp7cI2zM5c3sOr6/fLhg8/azhIm+qXkNh9NNsXNv9nYyoQ+fD4KmFzWi64Z5vngqrq7+tvVuXhq0hP6LTEBqwpJSc2tZphN240SeGSLkA9R6oD4zzhx4vo1fSlJxAX7On0DDMIiFRdmA7wjgxNNuNfK9GkCiZIp4d14SHZx6xPjmH2xeZD5H8mQy/Pn8Hjd16YBhaRrR1yy5RaNTBx5BUh2IYV/VefnNDVb1PTOPgeLPIvLDnpvLImAN5XkjT+JpXS4naIWb0hjkfU1Bbm4SBAVAK2rMK85CyOnl/BZ2HKelFDBNi9X7y8DMt78RC9iwwTZ9uWXwYQOUr6ZLouWHEl3sFFUbZpJxSGJ2HTgsA9AbXsuganmbZTgA4KUuqKWPeZ+YNY55m7VLEI+RB+rzeawsfaBjcfKOP+5gLfRtzjgs78eV9bUwxL/PPyQOS+xoxqM9TIQyiRq/lI5CvRi5WKIlAfu4Yx/EseHrYq7my/G1TImgMWb2NWF1DP4wE8bC/lZSzuhihE4gdkJInBJBmGiu0Tpalnk5yDmJkEDR0dT5vgVgpoz7WXs9GpKE58zNiRAyLxgtpFQJRcyi5nkgkEolEIvGqIUkQBu4+KKtHD9fYbuzgh9j+7V4IkZhWd3sGbPdqrGg4hRizhOPpMPjanX4+DcYEiys7iAYRs417Wz1BmHRLbHFenhXU54TteTB8y7LA1ygEDzWxRbMulo83Qqpc6GK4iyaIkx4awz52+uFu93YNb3EnMyzQo1CjdX0RfQnikXqVeRBL7qnAYs26m7xdryEY2EQngE8FOIZWSj+kv8shniD1QqjPMRku7qVS4aKbkYyiI7i5Bxd36+ftLZoMumh82Bi5CCY0/CeG9JjBY6mFlSSyeWI7yE6C6O7wuvtcGk2782XDMKqCMXULNlcNvUA0VgLRMxmEagCyhk5JmmJ5ZvpWwRtLOmTrg8OMXvX+CLvwcoL+pyEIBICZZ6NO52pVw36rNBnX5kUypVNe7tFdh0J/4zEfrV2eVjrMcWC0398JEMJk8kqC9VcIW1t25PtJ6nDUMLedBUGwTlWjpQLdQvFOwRMFQorUrWMzUVHTINqrkCNPRci2PZcXW7kvEu5m47jMJWwQrwUyoWXti8KTl45oD7Hr3UyivB2e+rU+s4eM5hCiZkTgqIsRl/5sBFKihfGwZ4DLSIu9jqkTMdrGSK566IqFsgRidSKel+u42CTQZpy0juPyKXwGwOx9d4OQngSfp4NLOcbH87gWLKQZWTrjMl/rHjseEiPeKCu9R6CpHl4Oy/uU3oZ4TiQSiUQikXhZkSSIGkvRc4KaeEkMl+xgKEFDLC6M0nTHtBLKzk6KnN5ilEP+YyK0OyFY2onE6AFwvCEeJ2bcmrHsoQZPu2Zt0Xr6gjhYW12M9PogRmavQHuPGkQnaUfZSckCTIvlyUirjP5mA911FygkFdRrHzzL7n3Tuh+YEBfNq1Ck9B8PIUTAdVcAuGHE7lauRpGFiqiBKHoZWuYWSBfrn012y3kTD4RumgkXOLFD7yGUg1CfC+FiBk/fJKyDT4MAirDwDW+TGYP6Wz8N8uNmeEDM/mBG+3kYeNZfprXiXkGn8TtbGFEkkqx81e8wHRYKfRm9JKK3h2c9sqEyo1j/5g1DgwMQ7yIlANxDJFhTMs+GngxX8lAoGSf49U40RDJNiZ+pHgjXYvQXwEMUOJzPPlc0pAOYBT3VEASU8FLPJmsjNfleLuTZYcSIHfWI3jQgIdlimIV7BygxYCFJNgeok2iuWF84sUReBwBu5LtnVpFKdhWY5QLsKhKLKsQIgCG+yUKQbE93PHl6wfGmdN6xVxFoPkSQ2TWCrA91bvPGwNOGcuoSEmGEDOS9ULeObWsgYrRWsNXu00GEoCWMZ39zgxG3JWSNaY2wX4JF3gjluWUaIvcKcyLLjPnQN1NmoUhQcHjeFtLU5gQ1EX1ewwyjbs50D2hdgkAtn+NDjpld0DL9PWa6LZEkYfJ5ekWC+Dn6d/Bcke/k/WLE3hXRadUL7zQnIMPxmO1r9QziC66ex0QikUgkEolXATeWX4lEIpFIJBKJRCKRSCQSrx7SE2TruP+pHecfLzi9JT/VB0ZVoUzfvT7NHhjlkGPtTBJO8d5x/PRhEk+QfYgR9k3PtR4vsrPb3uzAqcNSs/pO3KmP3cWQQtE9EQhyfgH4Itoex1MGPlXcUcq5oR0F/LzC0jVSZWxP5DixaAkcH9LUvZq+tG6aCYQYvRD2s3iEWEjHFLtOsns6PFlIszVYHWnsqBZcx5ebl0gn0TUxTDumY0ezn1l3PMOONQBcCvjUQacuGRBcDLOMtKoAegcOddX3zAxF6uAu/baDHFPk2m64et5MIQQUdFKCG/tIscuTl4WFvkQPAg/B2Hj2XrDLNDOQ6VLQTpMHjosk2pxg6Rvf2deMI+apM8R5xz1i5iFL8Wk748cbOv+K7tT3uY1llzAVdMkoxH14tFgfmX6Be1fFNL+6I++itxYRAjhNK2PPokPSNdwGoQjzgABAKs4bQ3zEw8jaJ2PWT8HDpEufUSMXqi3R06PKOS5oaTf3cdQd+Rq8FgDwQ/F3AB3mrSOV/f/Ze5dY3ZbtPOgbo2rO+T/W2q9zH7aJYzuKrQBCiiVQOkhA2hBEkw4SNNJLeiCMkGhGQtBCohHRJDRogGRaSEiR6TiAlHSISAgiKPa1z3OfvfZa/2POWVWDxhijqv51jn3Pte85vvecOaRz997/Yz5q1vzvHF99jxpL2q34V3UMEagoa6SmsCx0k6LkKUW+P78/xFki+wQRwn5Sw42wF1x2A5YlIq0BZQ41RQcAaMrgWMBcMI4ZwUxcU+Zq4CpCNc5ZRF/PhFumiMVAD0O2+Geu2wqsXibuY+JJPNdZtWg5c7umzgwLgtA5r7Ldr3oMleBjx9cMZ3MKmuJl3/MqOSAvqnep6UH9dkyTxUHq90UI2X6jNP68vf68fCyKGQu5v0u9IewezX1SGH3xfvgC+69jgtzIdGwblNF8eARf+L3uCSuE7v7sGGE3DK8vGN5stdVWW2211VZbfTvqOw+CxFgw/eojzscj1nfaEU3vCPGskhcAEKYa0QoooLGCkHdA3iv4IPtcG2u6BgVKFqVdhwu15A2XUXgTaJGfADQ+tE+PSWac2vkc1MZ2KEAsECKkAyBTwfhqxvdePgEAxpCRC+Pz874+qA8h43t3JwAAQ1BA+MPdC1xOI8ppQLkELLGbElGAWFB2gAwGDLgpJGCRi46IOLWebnTkVAiSoc3eyjUVR7/CwJRBsWjzl/T71QvEYkaFijbDk3aJ0oEpNDPCmVEGRjlmwMEQP/6u+dGUl9Jo7QCwsjX2pJHAQjWWFoAZgjqIgC9KSaD+HbW5976m19Lb52+Akk6yQdbw+nlpYg7Vzwmjpe5YMy/dZSpRAKZGibd9uYyr+huINsrVM6MDcqqEw2j/EkXHD4AcSwWKioNVgs74lJBnnatlQdtZPb52DHVfPSBmjX6vaPCesB5i1usng0pCyg7P5ApNyoDSjq2+XQh8dZAOzd+lSoY6+YN7ciS6kWHcdJDhGWDVAT4V2CNBnkptdsligdnAkBswrTuF6tfDouatnV+GF3cSC5fYORhXBgN5MuGUGetBQZBpTBhC1nhtFqzUQAMAGMaEEFpznzIjpYCcQk28QSHIzFjc/LYfD6DOE7BgmBIcDxWLSiESAxFy3W/ggruDureyYlwQIaw56OdxW4IGxqxrqPvQN6n+m7mALMHHwQwFSDLSwPXfgIIvdRP2/RjLzXislrSlUiD9nAMd1M33PlXF035W82kBUAEgj20mm+wOwngikEc4i590TVtCh5ah+02xwbPrVFNyHAQp7bN+7fIoathKnb8NbufiVltttdVWW2211betvvMgCEHwy6/f4ZNxxbu7IwDgfDcivmfEC9WGM4+tScojUHYFcsjgMWN/WHDcLXW1U4SQC+O6RixLxPU01saMrmYaeGH998xdBKYBBdAmPF5Ms+3a8dA8Q/KOkV4nYBDImDAeVnzv5RNeTtpM7MKKyAVDyLimiCKEQIJj1IYockbkAobgR3iJ64c780VpT8J5EqQ3SY1H3QyxM0JV51euzZiIAiXSrWDSakDOqt4lutpvzcAE5D01AClCH+LbcFiiRqlRo/2KrFwi4hNjeNAV8jSTAlIHY7OMBposNtbmfxAu3FbXZ02x6E0SuUuY0aaygQnVK6NbPXXtfxnQwK7qa4K2wurfo7YvN3ekrqH02GDAjoP0zxuvC+6/Ty1u0wCFG5NDQvXlKCM1MObLGh0yoI6APOob6b4oKNI3Vv6fjUsZBRig5rJiIMIzJobOCTuX1K10SzvPm88TOuaU+mmUkVAmM8UcSvcdO3k28MYZDl1jmYl1LtpKOmUFcAAoqFIayIhYboAm34U3+RRKbVL7Egcnih4zBml+OJOeV3ZQ8zmLoJ9T3uRGQMxriBycIvUjuRlL2PZEqgktpwA5M+azNu/zrmA4LCDCDTAQYkNXliUiZ9ZI76z3840vTtEoap+nFeDr5oLYn+thqGavwf1iRI1316k0tlEQxDs9yXFaKzCQUtAUnUINQCh8y6zw8a5jaEBtP7RBwIP9JgQxexj/kjFTbtgudp1SMGCw3wHqZ6Rw3TU/iz4GFETxcwmhoNjvHkMBjxLoFsDxbRdWIkiCMXWknZeehQ6+A30G0npsO9DA0/Y7IQ1Mt+sIADQCpWcoPbtnNxBkq6222mqrrbb6NtZ3HgRJOeC8jvjF+0eMJgV52O1xOU5YrwG0MGS0Zssp31PBsF+xm1YELjiMK/ZDi4rZxxVFCJc04JoiHqcJ6xqRMyFNOuSZB/DVwIEL14dPN1cNV2OjJNyAIP5QmvaE9IIQjgnDmHDYzShCeJh3AIBLGHAcFjA1KvmSAz69KNATuWAXV0xRV4fTBRjeU90/FSBPhPMUUPbFDBjFmgdr6lzC4w1Sl4IBwAAH6tJZOjYATCJ0CUh31tjGjqUAaNyqNUBSCDmxvre09Il4IsQrTL6grJvVWBxlz3UsneFBqYsrtvHWpAppzIlnvSlbQ5EDgAiUsQ5Bi+M1c1CVtbQNeIRwL5+5WXEFwFfURKEaJdozRUS+sOrrUhYCgNSAJZdQAG1byoCxHnlpwEt/EGLADHlyRkYdj/UUILGdi5uKOmBQhm6bfo+0aXIDMLmkSJktbf81HaSyG2xffpwCgFROVqKBPrFJeNzktCZi+PlT+75LeDSi10A622+etEl0gKXOxw7QExbADE0L990iasIRz9xkM6ymu7V6CZABKjflY1YIAlEgJEiLax4M+DEwzuU0NebXJUD+czUDnBnDo24g7wTpPkCGAt4nHXcWyKqDnBND7DcvXFjnoEufutOoGGhpc6U32fUqT6GmsESL6fV5UIZQr1sJwPJKfxcvxwku56Js8i1jxAF6rrHOzy/BJ/rfD38pACUoE0WiKAvPZXNuqlroVv6RbXxNogRu8z3bvHAGjrAgPzdrBbAMqu+iWG4kO0RSGTH1GDtAxX/vJLExilClj/UcfW6T3SwE/X/z3D6CAJtz+r70wJvfXw4YAso87Iyc3aR2q2+myquzV3siAAAgAElEQVQjzn/1r+DwP/5vf9aH8o3U+d/5KwDwnTnfrbbaaqutfrbqOw+CyML4/Q9f4zd++aPKohi44F3MmOcBOQXEIaEUQrAV3RAKpmHFEAqKEJYc8PxRsRgbJBeuK48xCjxeJQkhR5XN0JmbzrtrHCXYM3CfPNEXC/aHGftxxRAy3l92mK9DfXsYE8aYK/16XiJKbrp2ZsHL4wXXZdCUm1gPDySagjM+MNJMKBNDRqlJLoA1s9lSDqRbtffDW4HhUcEPBx1KF6EbrwryhCurr4oltDTZUevUKavvRBmaxwZbMkqeUP1X6NSkLBKDLqAmY2n4Ne9AgBz0+yTU0lm4nYt7QJQgKJPJDGwVG9BGgVZSxo74trvm5mzMg9SuqTJ62mfimUCxrayDn1uDUDtuftb8dQCLRtm2qNw+WaVKLPxrPUBgrBAmgAzI4RXgVeo4g1QeVs+xY8jkCeb5oUyTKid55m9Sk3Ci1FVxPxZOaP4YFbBo3+UCIAMhaW9X0146cMgBNiE93ht5krSx8+1zavPSWV9l0GYzDxr724M3QnqeLTK5OzfSZrjKVPox8r7S9+9NqcvJfPXe7ytrvG+OGbhlODAgQQ8qezywsVx41eadM4E6XyJeNBWmjIy8MiS2eQxApWWzfp9nugHl2oekxsMqa6gBW34OZOBJsHQRXp+BYFkQZh843YczRfLUwJGeNVXHpsYt4yY2ugEz7fo6YaIfQzHZWAWUuZMmdj/inHQ+hLVduxqh3jOp7H6vaUNo7/l9qv4zXcxwBbU6cL2Xt/ifiSv4pz8I3QH2IFoP3lT5lp4TRZUEudxGun2IEOKYIIay5ZVVkum7Oocmc9pqq6222mqrrbb6FtV3HgThBMQfTfjkzREvdvpkPoSMgwELaw6IrEZ/zhRx2vSaGSkHpMxgFkSjRC8pVmnMmhUIESEwl2qoN764Ih8ZIsByHiELg5bmCXLDNgDwHGUpg4CPCXe7GYEEqTAulxH50bT3ibEGwWWXq6Qkz0EfrL1EYzNFCPl1hoQAXnxcLHp3BsZZn6BVttGOhbM2vt6w1qakG1u2RqgM2iyXqX0mzPq9eAboCSrr6A7P42edDcOrII9UZQrpAKz3gmQr1fFC4AW1weJT29Z6p9tLe1FfCX/WNwPRuuqcfcW/azC8URmUVj/EAu68AnI2ir6t4FKQ2pjM16AgyKr+MHnSbTeDW0GyeNAaWdqZJN40od50DXLzmvuUhJlq81VXi6Esh+rl4gakvQeGAQ/uYUNiMiFrTOOlNZXVGyWhxfLmFiddVqX56xu+f5XLaJy0nfsNgCAWg0wNCCl0YyysDb4ehzfX8IYbt8dX2QAdoEbFQZpbsM4bV5cbOeuH53p56vd7T5N6bTomis9f6fbNz+UagN5LDoKYaW0dL4LJcnxsnu0jOfsAX1ilr8yMjrkhsTW+QjpH1H+FlVHTnQPP1EAMkgaMdtdSYCwZxo2RbzMCtmtV2n3oZrN1PO3aakSrHpczn+KlPyEdC78mfg4VXLPIV2k4k42DzpsqD+tivfX8BCVSBc4k0g04C9hvTrHfr46J53X7d/vN7oDTCuCJAmHq02IvOVNpCCgBAD+LjPbPSPOv6eVTwqLn7nPd4svlBiSz3xn37QlyY/4q9X9gniQCHnAL4F7Djb/JVt9MOUOirx/Hlnj+nT8rdsWXHftX/c7GCNlqq6222uqbrO88CEIZGB8I794dsd7rk+39bq7AR7CHwF1MOAyKEKwlYE4R8xqV7ZEZOQPRqO9FgEjqYdGnI3gyAgAcpgW7mBCp4P39hOsy4DIPSKtekrQw2DTlEOgKHUl9iKVYcLxT5spljVhSvEkgoJlAIBRrGPJoGvJ+1VCAdI0Iu4Td9y647saW0iIAnwKGR8ZwamBGTYmBNsFlbOCIrK1pBGz1dNLmMx0F6SAou1Ip17QQwpUwQv1Pbhpauza6kmwr57bC781GOgrSy1RXUcvICFczooWBLLbIWgaTA7zKoH3CsNPOaBhyBbuAZpDongEpaZd1k44hbRynKYGr0SuQkm6nuInh3qQ8mZETg0c9yd6oMd1FsKdmdCa4gLKO1LOAIFlBMx7KTXMiZga5LhpbQm7Qa0BNHHJLv4H6KjBJBXJ0G0DJAescqikvm3/N8Eg1ncbBB1+NB55dc2va3ZwTUKBKdhkYBBQKeCiIMd94MEjheu5i55TnAJq5zhVnS4W5Ndo3TA1u17vOI5/udsze2JeI6iEJ6PyGWMNt7APO3eJ86M6vr2csjzLY/HRZUHe/uXTEvybZGuge8HKDSmdB9ICGSyLME0aoS9TxfVhjLGQMBzZA1c/TpDO8oLI32sUz0CS2/rqCRB2Q4kBoZWH0oF53znlHFXy7Ae4INWXJjyec/Z6lCn6AFCTJPjZAA1o7cKj0MiMxBkzHpukTeNz0uIIyDnA889DJBkrQ2ACRfr7fpDPZnzcpUJ1MyGVyFXxzBpP/lpEmjN3KuKibc7egorDcgHuU6AvGyzVxyoCRwmJsN7+Q+tn1GlBBXmcpwX5DnjG5ttpqq6222mqrrb4t9Z0HQQBrqj7c4fGiw1G+x3h5uGhzywUpBwQutdkt5tifDAAB8IWoRPfi2MXU4h0BZAdBhhXHuGAMCcdhxrKPOK0jTot2LGsKmIaEXBhLClhTQAjtGAAghoxHk8Bk149XE0Z/6teHZEmsD8RugugP0axRlm/uT3gcUm3imQXz/YD5MKK8DVXWcruqTbXp8+arellAGQDrnaBMgnLM4F3CEIseKxTYWS+hghec9CGdPU2jehJQlcuUQVkFAFDuMsIh1cSSTEA+oJnLJmu2vOEYBRi0Cd9Na71OMTh4RRWc8HHwKM6SCWUJmvyTUX0a0l1CGHMFTXLiClwBQBwzOCglXWJGCIJ1DTVVQgrAQV/3JIqcuW4vhIJSyFI6FDAgbgCGgyQiQLG/9+/1czMLEIKART0K/H33KABWpClofGrh6qky720snKlhTaabirI3fBazK1ETk8rBjuN+RRxSBfA8QrW/d4RzJYcQ6TGlIWCtXg5c9y+xMR5qVWAANX63emVA56aDY+5nkmNb+a5yDpdRpNvvi0ul/CU3mvT3yZrTQUE/L+qMfh1AbJINuokK7tkO+oXbc3TAg0waRHaOVWbEbTveEPfSK5d2wcCjak7b7b94M2xjduPRAm/qfRypsXuqZMfBEVFDZW+6PbFJcJOGle3tfDSwa+Y6tsIWhdywXQVTGMoe8vuMGoBQTWV7ZlB3nXhRhpsnKbHFTZdBbuQsVeLCJiPqwKhq4ittTtyAbwKTJXXj+AyQu2GZ2LacUeLH5syRL/itGIjiUc5Atx3/KFOVW9VjLG2ugIzF55tlA04MrJLBPHK61Jyt/uyqZ1g8Z0x8GfviqzAyflrMiz8J++OP2s7GBtlqq6222uqbqp/ZJxwi+teJSL7kv3fPPveaiP4bIvqUiE5E9L8Q0b/0Z3XcW2211VZbbbXVt6O2Z5Gtttpqq622+vbVzwMT5G8C+D+6f1eFNxERgN8G8GsA/gaAzwH8FoC/S0R/WUR+/6vsgBOw/4iwnnXp7TRowsoQMoaYkQvhmiIWk0w8XieIEK6XETmxrvIXwsllHiy4xowQNDkmclHpdmmY02UdMKfY9sMZ+7hi7KgUgQsuacCJRghQPUcA9R1ZU8DlYQcsDFpJV/F8lXXKoKGoVny25dlnBno85hrPmwvjOC2YWY9pjBn3+yuuxwEPhwPS5yPixWQnsW3DV/6d4t7T88uhgPYJYSiYxoQYMwhq0AqY5GEowAtgNbYKBWmRwWuH0U0ZYSwYxnTDYFjXgGTTuLAgxNJYFLEo62IJwMk4/jMjzyPeX11TQzfeCmSr5uL7dq+KRIhXUgPNju2SnlhXkWGrv75i7EyRQ4GMUjX6KwA6h+qhIALIKFiDXjtfJa/HY3OrSqLsUIt9LrkUx6I4AZW1AMBqhg5yjjXNYg1ya54IaIRoLCBWVkpag7JDfJyPbeVepSu2Ar+6VIVbCo4YY+eQEe51KTzGjJIZaQnNeLFPFQL0iybfIBZQvB0HwFb/YVIG6WkZ0ChZYyCUUSp7oJfq6Oo7VdaKmlXatnsDSJN8lBb41BgON9qf7p++0u+fIWel4NZ7xRkEHZPluZzlxg+C0PxTun/XOfhc0eIsEEaNqu6P1w8BjJok0ss8XD7iepjeqNYPNsztXJRtQI1NQm5wTJUhpr8RNlcS6fWJ0rxpnDkCqGzKmR/OGgvS5kpogy794LmMw49D+nuHahJLThaxbZKbKglyk1ofUzONJRb7vFQ/pfqdOpjoKBXd37t4W70/GtuIijKTfFzLKLcMIPMoco8V3W//vrGTismdOgkh0DxRnDVSZVJdAlae+usKlYmZ2bHKxuT2Ht3qa38W+Sr102Re9PVVWBg/rX3/uO1vjJCtttpqq62+7vp5AEH+LxH5e3/Ee38NwL8K4K+KyN8FACL6XQD/FMB/BH1o+fFVgPGx0dvXhwEnIfCYsdurD8hlHquMY30ykf1K4IVrlGYK7nwHLFGfPJ8sTldEvRhC58OQU0DJhBAL9ocZuyHhOOr+ptB5TZgnxZpDlRBczyNkZYR30dJJ9KE4Ga1cdhkYURsyuvINZVqioKzaiF5XRloDXr0438h6Xu8uGA9P+HRa8FF8gfWqzTRP+qQdvFG1B/2cVXLjprCHKSFyqRIT91fx88pCkDFhN+izZC5UPVQAYM2hmvkNIWMaEt7szxUMelwmvD0dEGOpHhz7aakGtnfjgofrDu9PO8yzGs8ODwHh2mjhPBM8WhUE5NGlFG16OGDhsa43vihJO9+aNOINtN1ZeQoaIRu0OVQjWKqyIcpAHu39UTRppU/scNAjCshNIKU17dI1XuIyAY9ONT+N8YFuvQ+emV2WYDKOKFgPRRvVQRtVAKDJwDIHP9yU0001ojWbJBXUAQPF7pf5aQA/BQwXSy4R1LQcLx8jMS+DPIpup/OaeJ7g4Y2dv6f/tuPsxqGOTw9AeLPflaf3wDZDX8aTM3CiBzT6Y1EQUIEacr+Mbj/N0FTlHs8TdG6ASuhY3yR6eGqKvZbHZ5HIDsS4D0YGgoOz2c+fWsOPNsdgCUwVPOr2+2wI2vmmhvv4Z72RFkY1QHVfCzVTpmp6XAYFH/LOvj4YGBC7cSO0Zrz4C917gop+9EBV9UiS23sKUSqwUsGvL5i9AEikx51Jgajes6OaDzcgsV47wg1YQyzIN/G7us20KnjdG/nW08pSpU/FzYG7n1uPCK6+IoYlkUd/m3QMJpkRud2+sMkDpW2v30eYgXXEDeC71TfwLLLVVltttdVWW30j9fMAgvxx9dcA/IE/dACAiDwQ0f8E4N/GV3nwIDXvzGZOBwDhSqAcUYaA872xE65B01sADI9sxneaRuJ+ATUdIOsDpjdVaT+YDwBhNT8LEtOni372cT/iaZdxeanxCB57qzG7+vS6LBHZvCrkKYJnRjypYaVGmmqDDQDpjpAWhnsjDI/agN7EpEZWv5AoWO8inoaMaAACZ0YRQuSMV7sL3o4H9XFIjGHSp/L9tGKIGSmzMlPWgGlacRiNAcAFqeh2RAhD0G070ONmpAJYjK/5kQQ3DS0VEEmFEQtj7QwXRs64381gkuq1MnCpkcAMQcqs/h5FGR1hVhNWH4hwRddIoZqv1jEKvrqt/86TXeMuccIjgGtjgtYUxgtAT7Ytpmb06tOvADFA0yrMY6VPnvAmJO9I922AkzexbA3t8zhYngnxasdw0qaqrsrbajBZdy1mwpgnwnqvq95pD5RdqdcHNbmFNFrVm/F6oD5eNr+vDH7QExnfMcb3QJgFlKU26f04azoHacJKBNKOkHfA+sL8WvYFGAqEGMhQU1G+9e0gtKYbxVKE/BidHRGV/eFN4I0niAEjZKvvPRDg17Yaw+bOZBJQ41hnUFDnMSHUTtMAJKG27eemsiQKeoIE4siOb+A5QAIDtboB6NkcvfeIHiPqnxKoASXd6/28vkm/eeb5If1+5PYY/Fx51Rjsm2stABOAKyo4IYFQznaKE5nvi6BErtGz/Tj143Vr1OLHcDuoN+DvF+JsG1hWfU8yQSgoUGnj6Gaxvnm/35zJIqGB6EKADEWBEL9fGJVZVQHLMUHs/1NqCpWNUWUMFf3LTYqQzc92vt34doyd5hdC3cm1rzljCgCQ6cbsmBPpOXzJuG/1pfWnfxb5M66vm+Xxk9QfdSx/GobInyR1Z6utttpqq29v/TyAIH+HiL4H4B2A/xnAfywi/8ze+xcB/J9f8p1/CODfI6I7EXn64zZeAnD5gSDvpDZUlJWqHK6EVAZIEMQL1/jYeOoiHu01ZRHYX4s2lM4aGN4bUyPiNj5U7OE6EPJMKJHxZA/SlymDQ9boXWNc5BQ05hZAuFgSylWbtzAD8SIVBIkXQnoK9SE3XCzRw3dvkgBeNS4yXAjn+x2Ge821FCG8u+7xuEwIJFiXWM1BnSRBBAwGmixLQFoimAU5tuVDjxFmEgRWw8/7UfdRLGnlk9MdrmtESgHMpVLamQuygRhpCVjGiPM81Pf344opJuzjqtKYEnBNEZdFEYunecTpMumYsSbTrHd6Dbxhccq+Mz+CAQfeJLnxa95rZGhxw9ludT2v2jSUKC2y0xeIL4ThEQhLW3X1lXDfjyc7hNn27wkgAIIBJmk1Q0dymYQDGQoe9Oa0ACooBngzS5CiwAdnuWGz+Jf08wSJOjmddZT9I07nB0M6GoWwNMmHHfvwwJje6ufHB1FTXTuWEvVzjmf5fcJZIAmQVcGNPDfAZo0EGQHaZWWdFGgkscuWzLBVmGoDXrpEkptVc9Lr2Cea6GdsbDtwoPaObgjbyQ5YbgGzypLRQWrgGvpt9OP+rAZ0c8Q22G+fxdgc1DEROgyAReVdovdmYz8Y2OXj7cbDxjKprCRnBHiTXg+8nZeyV/S8nV3wHINwsIjWxlJ4DqbUuer3SY3m9mPoYoR7MKaTBjlo+QU2jbN1fAi761g8xjh04+HXxYfRwACN5W73Z+kYVD6PfH5KDwqygpoSDTwLASVKvedhpq4ylU7q0rFzhCBjqdHXVebYg7OJ9FoDagRtKTA1hSeUKlur8d+FIM5OETflbXMsG1AMADLjloW1FfA1P4tstdVWW2211VbfXP0sgyAPAP5LAL8D4D2A3wTwnwD4XSL6TRH5GMAbAP/fl3z3rf35GsAXHjyI6K8D+OsAEN68QvmVC8Yh4/q5eYG81WGhRIhPAIjAS3P7d+mDNw1U2oo6YA/E9gBPRR/w40mQ9gTHB5QGDiR7UKYVCIkg783fIgbkKAAL0jEhxIy8MGArhzwTeDZ9Ptoqb7zqsYQFLVnBKNHP2Sq8anNKWRAvhPAuYu0Al2WOECHEISPNQTXxiVDs4fuauKWpLBFyDZiB6k3BrKkjyZJt1qxeE7uoy80jZwQuWFLQhJvMGMZ0o+vX+GFGWQNyEKyz7gcAzruMab/iNI41SefpMmGZ7fpZo8axIOwSQiyQN8ByGTQtB1B/jqQMEYgCVgCqZCRPCpCVXdc0d82bbkT/o53rLFC9L+gSUIagXiK2yl5GIO2l/r1S0C9UV865qc1BIjUaVr/U7btLBKnAhgEMzmxaj7hZWea1azbR5ofPI1qACGoAH7jGbQpbA8Z0IwVwyQYJgRZCPBHGxzZI6QCkvaUJDbZa7nPRpDEeReqMhLAA4/va4WMVQI5Z/UJctuJsjKyJTWVnIMjMlcliB6bNsc1d2ecbZoUAOlkMYCmw5BD/egBQpKV82Dg/T3JxYKfNldaQCwHIAHdyHeHmBSG2Hd9eBRi6VXxhqqwel8e0aFeVTD33qqjX2QGEIi3xpNspZQNGDHTwJBlPGalD2YFE9beul4rYjjU5pn/NZF3SgEIq5tFi852d4ZPb3/03zM+h9MwQAzmeg03wYZPuMwDYr4/Lwny+dVOlMiJEgTn/PPvcIdsOtc89nwuezuLHJoFaapDb4kzUQMt8y9JzRgygjI0ycE3+0i+33yyV4enfK2NtJ3XOc0Jj/vi1lg7AcYnWDeCGrVp9I88i4/7VT/3Av231kyTIfBVmyx+XuvPT2sefZvtbbbXVVlt9ffUzC4KIyD8A8A+6l36HiP5XAP87lFr6n+ILa6216Ete67f9twH8bQA4/sYvyq/98DM8zhOubxUEoY4xodKYbpUU1qx1D7+VMm8PrXlnDIJB2SUet1lGgKyRFV9t7RgHhK4ZWNho1YIs1txdA/iqT6fBABBeW0MhoVvpywAn0ZVSH5GONl2MgSJBajTj8EQoewdhbK2fBXln5q+xGJfdKhPmeQCT6PtG2U4OMJACIRq5SlgNnPD42RAK7nazyldIEIeMaUw3McCJRbdDgmFMmGVEcQr5zLieI67DpCavQgqQVLNDAR8Sxl3Cm/sTDsOKl+MFn88HvLvotX73cERZGcUYNnniSncHgDIWYK+mrESCdIlqCOoNSRCEXQZzwW6/oFhssnuU5ClgDgPSE1c5kgxSY35lMsq5ENKRkC5sLA5fIUaVWvhqeW+qWRxESwrGqc+CSknyXj+zvCydnEHUQ2bpmnNGjRPmVSU84drSlCkTytR8OyDq9+GsGDfGhQBIVBvs9aDnkA5AOgjyoSgIZ/OkSlEy6TGZsS4vxsSaVcrj8304BcyvGflY9LtmIKoDgeZVIibl6U1gAUAEUhi0MugaQEtjckgUBSSc1cJAed4I9h4V/ld7rd6DHUNDG2z5QnPsPxic5BmIgQZWufzmhumBBkT5Zrp7WkjU4JPktomtDAW7ToUgA7WIY2dE+Mf78+skXv4ZEVHDYDvPEtr+btgnpIBFL+9yAKOPtHVQDgDE43dXm9PPQY4CO0c7Hrn986Z8Fw5YwH5nO0NYNp+UL2OrqAyOGmupe70C4I579hG1DkimDmBcgUDt+4DOaT+nGzYL+W85KnNM4rPr4kCpdMBh70W0o3reN1KuLwNQCdWLp/fEEeou3He4vqlnkbvXv7xRb7baaqutttrqG6qfWRDky0pE/j4R/d8A/hV76S10BeZ5vbY/P/9x2xw4YwoJf3B5gWAsjHAhyIC6Il/ZFm5Q5w+8vgIYdHWWU6MaU0H1HSgHbWQltofU3hizeFPMrTkIV9tGAWhhZWQ8hro6X6UTQF2ZvlldX6HMFJdWiDZkbE/SJQJpD2DfVoF5AfiiT8ZijU2ZCiQThv2KGMuNsauzNwoRpt2KPGgijjNB1jUgZ0IphFIC5BRBmXA9t2l3fTFgHBPGMeMwLSabMR8IIeTCEDNOJRIwC57s+/wUwAtBiCHR/ERS19gOUhkZgQSBCu4HvZjnVZdMPT3CG+ZyLzcdFUVB3K2YpqRpNon1O97ABzV+5VBUtrMGpDnW5ptYEO5X5BCR/dhsBVxPkur+ZFeQRgEtjbHg/haUFQSp/g7eNHKTvlDRxjSPwHonKOZfUiZfBgcwFgiJmpb6pQwCJFaPm4uic8MTwIse15gVwMsj1WZKgRZ9Px2lvs6Lmr6mA7C8snnwMgNjack7ngzTX6fRfGGKgjFhYsRLY18NT4LxQT1v1mNA3uFWzlJXze2+2xXQlW+lGlEBIJWRMeK5MTfS0eROo3l5xOYX0r5v96cDD3asehIEODhVjPXji/H2dTFmlzN0wHTL/rAi917x7wRnIHT7pi/5ntCXAyA9umGmIdUvJUAZZ34c3PZDRVk9z416i/3eueeRsjWk7QMK3niCyY0pKWuSUgWLSNOgqjyMdPwDE8gBvmcAQRnaOT33MfniOBrgW+camszJwM4veKLaT0IFTkapEq46jBkIi7GlDEy5AWbsvzKiARX9dOqAF3/v+T3tTJzg7CP54vb7uci5AW7x2mQ//We520dNEqL2/xWNyYSOCbbV8/o6nkW2+mr1VdggfxJ/k580meYn3ceWfLPVVltt9bNVP4+k137F5R9CtbjP618A8M82De5WW2211VZbbfU11PYsstVWW2211VY/p/VzxQQhon8ZwG8A+O/tpd8G8O8T0b8mIr9jn3kB4N8C8N99lW1mYXz0dI/zuz0mMxUNM4AV1aySjQniTA9Aje+oSEva6CjNPMPYF5b2sStId/oa26ouXXXlL541CYN35tbfeR04TRmiDI14smQT2wevKnnIY+/x4MyObtmR2r/jRf/MU0eZ7lYqe/8AN0+VTIix4LhbcByXyvR4Hyd8/vkdRIDhmDFEjbGdV51W6xo0BnhVLxO+miTEWRKZkNcJ15eM/XHGLia8u+wQqqdIWwJmQjU8JYvolat6lFBBi9CkbuU5E/AUcV0ZHyaV9Dy82GHggkeTw8g13HhDIJi0p/TLuirHCSFjnQJyLDfxxYB6qKyZUK5RGQhmUCgsGO4XlKHoXCkEWhh89WVmbqybXdGV8hHN4BCoj9nZjBLBPQPBqfDGHALMmLExAmh1iY9Kn2h6boIgNeY2B8Zqnh/lovuI1zanXZ5QQqPzUyZNQzJpgQQgvSgoR0saOiR9PbOyQGZlndRxD9IYFqznnkNBGfuLSZgeBOODYHgC1qOZoPplMsaA+1AsL4NJfOrXkQ66D01LAsZ3bYzSiZBHvSfKoJ4KHiFaL0M3prUqy6JdA2cnVJ+H6hOjPhllMKnIM/Y8dfPQZRYCqlGx0nu4SFvFr+kugppuVJNhMt0YxLp8z5k7wp3XDaFJZggQab9vjc5COjefMxGqXKUxQnihL7JpXMrTe6KYXwUAwLySslFo2ORgTVIkliTU9tunmvRhMc6MKpYIBNjf2ZkX5pfRy5FsfNs2UFOExCRsGg9NyFdW36WClkoE3W6Vp4y322oXGHUMq9eUO04bi6ZEZ1fd/jbreUr1HXEJS6YuEaa7xd1jpQvWuvF9qd4laAzFAmCc6Za9slWtr+NZZKutttpqq622+ubqZxYEIaK/A+CfAvj7UDf237TRJcsAACAASURBVATwWwB+BOC/so/9NoDfBfDfEtF/CKWc/hb0EfM//yr7WVPAZ2/vgJmRzeNgfE/gqz68cm986g+bkzZKnKiZjna07bCoFwivMC8Fqg+03vgwq1dHuNr3yHwcdnZgQvrQHQBkbTjVK6E7eAbSDlhfSI1FbRRmTX6BmIzBjnn3mTcp3UO1NTI3uvdRQBcGrQBKwDwpAFHk1hZE5gAIcJ0HsIEb11k/m+YImQNoZfDZfEISKr2fChBPjCQDzgZinD/f12MIUwaRqNeHAOVp0JSEvnnuJRHPaO18ZsQnhhCjPCjv/cOXE4YXczVfrb4R3qiwmXw+o/cLACLBYacDHE0WNKeAeR4gmVR649T1GkdBKIWrcShWqp4cPgbCpHNP0M6ta65rUzrqBaMoKA76FAJiAceCYkCPJPW8gHvYzFwTN0qm9t06ULbvIEAQpFcZ+U6NdwFNeonXJgNwwMyp9fEMlFUBmrzThrNMpUqGyhKAhavvhxv2NmNWuo0uNV+Osi+47s149xCw3hHG9yqNCYvcSBaqWbHJwsiSMdwomIoCJ2nfvE3KgBu5TSRtWstA+tlDN7WKggkqxWiNdJ9W4jIFTbpRSQxRm0MggEyGUn1U/Brbx8ivd/FzkG5ak0bzRtHEjyg3SUTNBLTJiKiY7M3nlEs9rNFX359bEFA/0x0fAOrcWz0W9qZD9s9WmZegeIJRB2LU6Fg0ALFQuQH1AIBMRqMmzFL3JebXUo9BLNXEAQYfP91BvSbuwYMAyGTalZUaQNj9BvQAJC2k32HR3x6g+g+VyCorE4AWbp5Ri89Hi1GmBlwBbfz9eN0cuYzdve/jLFTjt3tQww+2Jt1EUTC8HgNVvyi4vKbzr6ryo6jyLE8t4tyuQ3zfpu53ub6pZ5Gtvnp9nXG+X3dU8Ncti/nTmr1utdVWW31X6mcWBIHGzf27AP4GgAOADwH8DwD+MxH5FABEpBDRvwngvwDwXwPYQR9E/g0R+b2vshPJBDlHbf4+0K4uPQ0YzJSvBEBGXXFzgGJ5WWoDRZk0WeXaklr6eEXAfAxeCPJeKgNgjZrQAFYgZHwPlDMh7/T9tLdGkGyFedWH5ew+D/YQm+4F6WUGH1esSwBmfbpOx4D9R7qSt94D8wcFZV+Q974kCuiquDeFgjyhrtxKEAN3CARCkhGX04AL72sTw1MGnTV+dnmnB3YdC2Amo7QSwmJjcyHkvW7TbDpQovpcDI+MPI84XwLiY4v1zYegx2ENxvREkBCaaWnQRllG6w4G64DdT6OotwoAFDP+JIlIY8H9qzMAYJr0IpXCSInNv4Qr06MIYRis6SmahhNYMIa2LJszI5Mm4ZSBIQUINkZ51e6HatOmDb/7Qrgngdwl8JhRLu5s2BpIn1NuYCvdijAA8FAwTgmlkJq3rlH364BW0khOVKAs3DY3brYZFHSTQwK/aCYH1+MAvrI29cZG0bQaY4pcOkDDzodWbtchkxoEd6BbnuQLXg7VYNIjWEkQXq42FxLOrwKWzyP2n3TMAGe7GAgSZjFTYGVI+Sp/mBXoABHWCCwvBelA1VdneK/gZbjqNnhRz4cbJkggi0al2lx6yo8zDSQ4qcGTWqiCRZSAkBU8zSMgg/XsveGq/3YkqnG4N6SdbCy0bOBR11zDtuUNs8e6UsdqErbbKwMIuGVmFTImiY6TPAMb/DMoDhR0k8hddIMYEEIVwBVCMxKOAvL71YGNkashqif4CFBjXfFsV/oxaUAClQqotDifbkwSNbCUBTxlBX2jsbAS34AgDnaAAOmpP31CDomyp1jpV9KxxyQSZOabNKkvxAh3AJ57f7RBtn0HUSDHfwscwHA2Sya9d0nBDNmXxgpKCjqSjT93JsBAA2WEzD/H7+s+pedpQ0CsvpFnka222mqrrbba6purn1kQRET+FoC/9RU+9xbAf2D//Ql2pH/QVBB32nDNHwQIa+MtURudshOUe31CfPHBCdOQsKaAeY24POxAl4DgbIcChDNZ/CzqQ3J5uaKM1sFbKshyCdh9FDC+B8ZHgVgaxvzKVstXqg/F64uC9V7fV6o+QYYC2mX84vcfAADJIi0eTntc5Q6UCMv3Mj74lc/xan/Bj773EgCwLhHpGkGPURNKhqKN+oNJTlY1uPTVQcpcWS/eWJYpIqzKMKGTO77ixkBWQRBjDVjz6AaZZRSUQIhXQrgA5Ry1UavP3tqchFkbLweaaiDICKwrIx8sdUVgRrDt4T1ParToco08CSgWvD4opeZunDFywjUPeFomXFPEmkKV4pTC2I0rzvOI6zJARFNtrsYECaTpNcOQQWOqfUY0kGTNQc1jBwVYVus8inU0ccwgFry+PyNwwSfv7jRu2IAkEWVp0MqayJGpsU1s/hZEzAaAEEOZHtyazWLMEU9hcdPdtrJsoIM11Xlk8H7FMGQb54ScGGlRgIrMWHW96DFOn7SfETEzy3imG3lHNWKMei/Ri6Um6Eg2wKSavmojTCurlAq6+h5eJaRDwmmaKkvFAbHKwpq14aOs8pf1zoCaczM4FlLjWJlyTUgZjozhiRDPaNHRNvZ+/Loj1ChsXWF30BIokzKTJAKFCTIClJsUI1ws5edqLAFjpNyAQUZ66BM/bmQU0v6rq/vPUksUZEKT9fS/8t7IF6qyjrpp8n05uGK/MR1rpbJskm3DmQ6+j9AxmqQBMJUJVQiSCBRVdkZBGRYy+lwgyKLsMrCywLC2Y6yAQGEFP/2tigT5tZF2bExAH28NaOLUUFAK6T1WumM1SVHdBut/5DHAhRTwKNQMfjugRwgoXFAqIKFgtIMkPXPHGVA9q6hKqCpb5JaVI0MBxqIpVWJfCAKy3xL9irHnFkXlyo4qgOU7qfMrd69356iAH77z9Y09i2z1naqfJO63/85P6/MbS2Srrbb6rtfPLAjyjZY9oJZsDIYfXnHdjbqCuCsY7hbc7Re8OSp74J87PuAYtaN6WPf46OU93l8nXGZd3k9rwPVxBK2M+KgSiXwouHt1wXXSz7x6ccYUE+YU8enwCvJhBH1KiGd9CB1O+vDujYxEIL9M2L3U/Q5DQkoByxIhhbC3+NdDND3Ma+DvpV/Dco04vrzi119/glfjBb/+4hMAwFoCPrre4w8fX2CMCbuY8OnTEY8PCpKEc0sPKUHlPS7ZCCblCItR84kUqLAG31f8w4IKgBRr9mRoTR1lqswBzrpKno64lRjU1VVtauOlraw7EycvCoLknXlTdCvreS8og0COub4WWCpYlAojEqMIgUkZHkPILe2SBEyCE4DlGrWp6BqiMBQQFwxDBjMQuKAI1ZXp3ZCQA4FJ31sPjFIYQ9TjuZ/0er6ezihCWHPA6TpiiUM/PZGvUa9HCg0YA0xipZ4rxeQsCALaZeyPuu1SjOGSGXllpIfB5Fpt9bxekwKUC2PlEdnkN8OUEGylP8RSJUpL1J+PZeXmyTEA8dKxPmAN+c4YO4NgPC54eXepbJs1M+Z1QCmEnBnrHFHmALoyYIlNeQwo+4Q4ZqTXK9JiDaAzDBjgIWs8cybwY4SMcuM/E58Y4aJzFlHHCAd9fxkD0l3QaN6rym5UXmOA3UCVpaHjpKDa8OTADZBXUjaVs0IGu29deuBJPsliswFIuh37Tvlx81p7Ac0Pw1KqaqIH6/2VJ2Wd6f5LhxqiAw1sX6nvchUUIfucy59A6CQioolV7kHjX18dGZV23/aAjcssrgwwW+KKaGrQUOr8EgKKaCKVyoIEJO23iJcmQSwOVqDdD8LKehIDUdTvBiADLaUQyqo5vVR9aATkQBygf3c2xlBAowLEkjvQzhKOKFFlqtTYX/+7+wINDkA8A2IyVfZVeSY/qtfoxuTEvhgEYSwoQSAr2zxQGVy7HwQ8FORsk4UFUgTk8eUmseFEwIKb30w9NtxEC2+11VZbbbXVVlt9m2oDQYQQTgy5MIrFrP7gL36G82FAKYxXhwve7M+4izP2QZkikTMCCSJlHMOCv3D/GeZjRLLl4iUH/P7jK4gQPvv0HjIz+JDwYn/FftRt/MLdI14MVzAV/CMSfDK+RN4N2H+kT528mtfC5GwUgMaCP/9Gk/ammFCE8NHTPR4e9/j06Yh1H/BqUobDnz++xV/+5d/Hx+d7BC747HrE2/mAv/TyIwDA98dHfH98xJ87vMNaAooQ3p4O9WE4Xk3PXk1GUSNR3XckXoyt0q9o5sZdd1M/91+gAmWKeEOzon6/FuFm5dojLsukq7XpdOt3EMw0EKJMkbxrG8xmMEpJmyIyuUy6RHz4mYI9HDJCUB8QMenLELLGtQLYjyvmNeJ6GVHOETRzNa8FgBS1sc9D71rY/jrtVsSYMcWMXUx4tfM5pIMQqeCURnw+H7DmgDFk8H7GYrHAwRqr+RCwrhHXMGocsDdSZi5ZWRdBEKaM3X7B/V61HlUqAAUcHoYD8hyQO9mSMyIoE3jWGGBhfX/exdo0p7HUBtLjh+UuaYNozIBsKJbLAeSYMB50HEIouN+pCa5XEUK2+2ItjOsyYEkB54c9wludDHRhyGPAep8VvBiKNqIud9kl3f6hgFlwGnaQJVQT3d1hUXDy/Qg+K5Akp1ilVDQVZczcEdZESIegbBb3rRi0GXXfESrm0dNFVnPSOSgWyZr3JmGz+ybvpRplBgPuatw20MxK0QEd/b2BBnSU0K55qSa8ug/Z6xhxEOSFK0BBmRuzZTQZR9/kOjOkA9ngZrxeUwZScw4li2+mq80FAxZcLqQmsVRlaSo/avGteS/IO0G6M7+boSiA4WyS3uuj+1O6e5A6pgiMvaDSHq7Ag1T6mhoTA1CAyJkopTMV7eKpC7jJgjq/DD0+HR9n7VSGEOnclw6QqAAJoIMTi4IWzjbpzzHbAHJ30kLtYiVCSR09SFCPrTJuWKPJK8PKfssbUNPmNpvMrQcuIU16udVWW3099cf5d3xT3iQ/rjbGyFZbbfVtre88CEIZ2H2qD8W+ohp/o+DPvXxA5ILvT08V/Hi3qsPgR9d7rMYamXPEm90Ju5DwZlSmCEMQuSWIXJYBRIKBS+0t5hRxpgFjyHizP2P8pYy3Lw542t0BAHYfM4YnaArMQOBRkK8BT6sySQoIx7hgP6w4DSMe3h3weNrhw6h6mbcvD/jnX3+ISAVv5wP+yR/8AJIJn52PAIAfGggzhYTHdcLDssf79/vKDuAZoEBtxTjrKmzeSW26sj80l271G6gghDChTAZGBKBv3AGXBaAmH0hQz4piTYGDGy6x8eakNJJENQJUhkq9qvreAm1cM7BcB21G2X1CzOg1ijL3zVxw2WXwlNXwEMBTLCjXCCQyXwwzpLSeitcAWQQlsDaEK6FMBWyN1mUfgUFwGjPmu4BXe8Kb3bkCZu/mPT5+f4freQQK4eXrEw7Tgv3Y5DaBC17sBLkw3k8TrstQpSQlMzgU5BQQYsY4JhzGFcdxwTVZSk9hDCFjDBn3U8FuSLgsA66WtrMsUQGRQsDqXgIKhgDa4FbpDBngFVElBnS/+nAiDhlpCgixIDjb5TBjP6wVWIpccFpGZGPjEAmGkDFw0f92ymBhFjytOl/DY1AZTw5IL6Bsl9T8GrKox0MYCsZpRRgK8tNQm0K+E/zCm/c43Q149+4I+mhCPKlMCQDysQCxaHLOKEhTQb6Em9V9wKQDlirCKyE+OStK51uYAczG+khqduxzJU+aMFIGQbIGn3uphzfW5OOMm7rxhAhApWI42GQpP95g55nAj6Eyt2htxIJ0pNYUe3MdFDBkTwWp9yWhLMaIMekErc3nJcyE4cnnh2437RpwyasyawDzbHEplqjJdNoTlpd6DOlezGvDGB0kzZcEprKJz67JDZsF9YNq6swtdUUPsCWgDIS8L8BeQbXq/8GoAAfPBHq6/b/JYtdR3BfEUrkqiJJJsRfR3wRNZeoAZRJQFEhovjv15OpJomOA2Dk6BmOsD6pSHft87GhEhSALNyCJb99XjImUARc06awljOm+15dlM0bdaqutttpqq62+lcU//iNbbbXVVltttdVWW2211VZbbbXVVj//9Z1ngvCqsbFCLaLws8cj3uzPeDWeETnjkgc8rDt8fFaWxcNlhzUFSxNhfH7YYxoS3uyVCXI3zEglYBdX3E26Cn5NEXMOdfX78XqPwEdbBS/4wfEJv3T3gH8yfB8A8G58gcPvRQwnIKxAWYDwPuAPP3qlxz0UHA6z+vGFArlElCfCYit5v/dWWSt344x31z3w6YQwEz4z49NPjy8Q9wnTbsUyR6zXCH4YmlQlAWxxvJR1ATEshLQjiK36lyhVGlAlGc9Y3+vRzEgtttNXo32743upRoQlukSmyWlcb09JV3BdXgMow6RKZ0RZIby2VB5eNSKVEzC+A9YXjBJwszKcJ9133gskEPJKkDlU+j5lQpxRjQslqLwk7ywu88p6vLbIyguQkkopAEAe/dwGPL4cML8ZEN4ULMYk+uz9EfNne03FAXCaUk2gAYBrIYwx425ccJyuOAwLcmE4pygVxhRUWsIk2IUVkQtSYXz8qHMpmdHrcbdgFxMOw4oxZNxNSp1ZcsCSQjVxTWtAWgKKmd0OD+qV4ewAjbTVMQOUVRAmZaEcdwvIvFWcCeXyomUdUIRwXgacLhOKyWmIC8bRvFhIMA3qUfOD+yeMJpt59+6I9WEEX9hMYpW1UFNp1giJyuBIcwARVOZGuo8TDrjbzfiVl59jjBkfff49xBPDrH3AC6OMjHwokKmAdwkwnxdAZQbuUyELI2dCFkLe6QfCldQMeW7sJk6ArMqq8huiDEA5FI07LqiMI5/DOiD251BuJTGpk0S4/0tvmpoJNDPoMdTEquGpyddq1DcR0lGNL8vQ7kchS9hZ7B6xOV8i4DG6eUD1yyC7H+MVmD43WZEo80Ujhu2+KEA8NXaXEBBnvS/lpKyq8VG3f33DloKl7I8yqJFuHaLn0cLUjRfabxEngBIjzGrSW6PDCdWkt4yENBOSkEpjnCVhMcZgILxn7D+hm9+d9WhpXztjhIwCgTQZzaL74dWSbkK7Z/zaqVzIBqPo4LvpK3pZismKbiRJ7tXB6lckpHOThwzy+VqAMoeW1hMEPOYaDV7GUj1OsstvzJC41n26ScTZaqutvr76uuUvf9J6flzfBnnMTzrW34Zz3mqrrb5Y33kQpDbaToEGMH98wIeHe6SDJa3MO5znEU8nzcjNprF3T4THxHgE8DaolGWcVgwxYz+uuCwDpiFBhHBdhmrIeTlPKNbUsEUy/sVXn+LXP1Dj0v+HBO/SK+DDxokPVwI+Geu/T3GHsisILxajbzd6fXzH+D18H+PrqzZwLACpOSQA4MwQHnAZd5rmkelGdi4MhKtAmECiYMf4XhuptNcPzS+pPqBTphYp7IdMmqyTdxo5SqLpG9QZm4YrAFJ6PxVAElVgw68NZ2B4L7WBqQEQkxpR1tehcoR40Qf3eBWNAI7qX+LNTb+NMphvw6jHof4GGunrYx4veizrETVO2ONqOaEm6OgG1dQ2nu0YF/UMKBFY5oiZdvhkyFhXHaTlcVQgpWiDVzLjfJ1qOs0yDwgxY9kHvNgx9nHFFBNiJ+DfhYQpJKwlgKlgKREPyx6n9zpfxRrt6zRiGBOO+xljzJhCZ846KaAymlbgtI747FGlKMtybFImaedbPQ+EEGPGy+MFd+MChiAJYzY5zrvLDvM6YFkUnMgpoCRqaRnMKDlgJgFxwTxE4HDFB/szfvjmEQDw2eGMj1/c4eHhoDT/TJCgCTL1OoAgE4A5qAdMbnIp/izi07s7/OrLt/jl+3f4+PULrGlCPLl0yrwtEqNMhJwJtMs1jljcKyOISoZM5lAMDFPvDwYv2jC7FwZJmxt81mY4FSDfWzN/Y3xp/zb5Bw1mQuveKwT1uEjcfCC6xpWvjPG93mPDo4CTzn/O0r5vF3F8aiBIf23VuFWqtCxNdJsuY78RHvHMZrAZZwM5zOMkLEA+6e9HiZ2dxY7qPceLfi8sQPhc7Droe2nnshqV1LlErhgI4+bI7j1SqwAgAi9kvwX62xGvnUTPzjtPhOE9Yb0EpD23dJYodbthJgyPgvFJ6jilPWG+ENY7wnonyMcCOeYKDstIKsdJBOq9S/x2yTpvhXzM7PP+Pkv9Xa1SoIAGSERNuCLWZCoRnZPlGkFmZhyGAtpnhJj1XiCVnYXQUq8ANfEWoCXedEBcHFPzK9lqq6222mqrrbb6FtV3HgSRAFw/oJrcAADDW8anxxd4PO6QM2F9GoHEaooJbXDKvtRYQhSCrIxy1ifeK424RMH7wTTxh4RxlzSO0Rq/vDBw1c/nc8DH60ssKeDPv3wHAPgLrz/DP84BJ76vcaDkK3bQJmP8iFAGxvUHrBrzKMijPrSGa8DuDyPS+6OaDh4y0q6ALNY0zFSjfMHmjbGXugq5vmCQkIEa6oFARXD4uGA4+fGw+n0wIcxq+oiRmrQ92kqyJ0uQxoTWxjRJjQn1lVIqrcHRfZjh5Go+AyOBkp+jgiucjNEhjfkBACUQlntdueWsAE2Ym38JgJo6k++yAi4z36yGeowkrwCbUaCvoOo/bMXXkzcEGB7b6juvZhjrTJiZcXq3b0kUC6PsC8qdNr8swHweavpMmQMSR6QUcF0G7MYVY8w4DGawSgWndcIQMuYckQrjsg54+/4AMSaHR2PKJWAeIpbrgDgm7Myk9TgpQyRywf14xcvhigLCi0mNVf/xNWI9RjWWJYFcA/gcUCxxh4eMaUy4GxcUIVzSgPfXCcti6THnEWL3DqI2bIiNwiAASgKkMIgC0hJBBOxiwvd3ajbxq/ef4fv7J/zo8BKfnQ6Y50Gjhw3gyReNGRruFqQ5QmZGOkgFszgB+WHEh6cX+KXjA37wvff4JNxjeafd/PA+VA8MXgiUAnJqhrGw3weNtHXQQtT3Atqsln1G2Tn7Qb/Lc2OrxLPFRS+MlKj6q9QEJG7sBzXXZCU7eCys30arNcw2p9zzo4IfZtgqAUgHNMNO34bofRAWTX1qfidyg8kACqB4hDKgnh4AkFMzN017Qp4MjFqNkWVASh4FJRKWF8ZcOqinBhlgGy6E4SQY3zfgUhYFL4SBIRprI9oYGQuCl3as6x01IETQjiMBlKT5BkGPy2OAKQvCqqBlGdo+yqhASd4Zs2tvgMpV5+z4qOM2nAjLhbBeA2YAYoAYmXmwFNLDcbZHz/oBWloP64WpAHQs5t8BUFTGBgUBOVMlq9dHZREJwCdlr2WLYE8vEobjimlaqwnzmkPdhoMhOTFKZgVKySKLgfr7E0NPNdpqq62+6/V1M1Z+2qyLn8bx/nEGtltttdXPb33nQZA8AOdfKkrZNpPDeCGUj0fM0wBeCPv3+hBL3fPgek9IdwViNGOwgJ4MYLh4ZKg9VE8R1w/S7aqvmQu61IMujId3r/GPfkmbsr/0w4/xK68/x4+44PG0Q75EXfn1hunMOPwhgR413nH+QJAPBThqY7u8AfY/ijj8IWF5GXH9xRW0z8BOO4I0B11FXtXoE2NRQ9CrNa73Ccv3WE1JxwxJjPnNgDxEXRW14gzkqKu3ywtqVHagMTSElNYeTc7C3tC0SFExWvvwdNuEgZWlcfk+Yb3XbXtjOzwBu7eC6aEg7RoTJR2cWk+4/EJB2WddJX/LlfHTYnrtT0dupL0GoDZ60sWYSmiMG29Y++ZreC91VblEQjpqQ7Xea/OHx9iMRoMg3K+YdgtECJd3OyBxY1mYIWKWiPMccAkTOAh2e0VZmAtSCiiFsC4RkklNEy+hpe6QnV8CKAXIylhjRNrrtb4ME6bdiiFkrPuApUR8MJ3ww72yMN5+cMCSAl4fLphCwqfnIz5/ONYGiVkQQ8ZlHXCaR5wuI9bTWFNJ+MLgYoaSxWQDuTMdFTOZLBZtKoQLCz7jQ5XUfLA7YRdWfH+voMhlHDCvsR7D6Tpivgy4O16R94x5iViXiPSky/MeZfqjT1+hCOGHhydMIePTnbJdTvGgoMJFTWAdwHAAMu/EJAtSmUu8Uid1YY0BDqJmmVHZQsItijgbY4oTMLxvDXeVtNh9UXz13+YVG+giBJN6UAPnOjNLv7fWF8BsTfx610x8K7MqAfFENcrY56qyWxS87M1LXT4CaGqU3iOCMhDWO2B90e3DwY0rYXzQY0gHYHltDIRdl5pSFBAKV8L0mW4gng2oSI2ZEhbpaBR2rhlV3jMvfGOWLNR+avNej9GZW2G2e5h1vDgLhrMxwIrF6Fo0+Xok/Q35oSAdCYNJ1oYnQVgFw0kBp+FJQeflhbEr7jUOmqYCNjYPkVhcLSrjQgppylIhlak4QBEFzAUcCmIslUHl8rH8PoJWjSXXtCDB8MAYHgFHq5bTgOX7Fsc9anLSfG3ap2CgShFCWS1ByEA5QEEQ3pUKmmy11VZbbbXVVlt9m+o7D4JgENCbGeUSUWzlmk7A7lNrYBb1rbjRZBMwPBGWlwHLSsivgWG/Yj3YyvgStdF50qZAmCAUawQmYCvALikQXWkcHgnzVSU1/++Y8MHxjB/eayP6KPvaiANA4YjlnjE+EHZvdbV2TYzVVgLDywXLU8DhQ1jiSsT6ilXnDWh06GirihZ5CkC9EAD84vcfcDfOWMv/z96b9UqyXXd+vz3FlJlnriregZeURLXgbsBwowHb8IO/gz+pv4AfDD/ZL/1gGJK6xeYleacaTp0hhxj34Ie1I+IU1ZKIFiV18+YCCNY9eTIyhh1xcv3XfzCUxqNV4uPnDW9vr7H30nHYkzRjc4Svv/SSsDGnVUQFvUaPWoAOExmvFGq7Sk38NpKK3OQERdI2x2jmU51k2+NtgIuJoprkyzwwPBaYUVM+z40SjFtN+0a23/104ubzZwCenjfwVBNtTqXJbBHVyQX1XvxCVMosmYWtIv8fZhVSbgrXFIW8nTi/N+9zKfswbWG8TIRtJNmE3RvUuAI9YZPQWxVWdQAAIABJREFUWZYyjRZ9sPIZxYsFp5DkiuwdEHTilFlE6ASjRvXmk/0mgb+cm7q8rTnOc8zgV5Dz6FXCOwc2cihqtAm8ujryqj4B8JPNgYjiy+aJjRn4prjmbfa5ARgmS4yaD89bps4JU6TXn5ybOWZUBZEJzA0cCJC0NMaKnK7hOIyGNkvQ3tU76px6M0vKtI5cVLJYajfxdrpgCoaLuudm07LvS/a6kd81kam3hIeSb/01zZcjV2VHZQU0/CbBNBmm0gnrq/90ah8zU0ikCQJ8+t4u7DBzVNiTMDdilUgv2E8zWyS6RHSSVjLLb2aZ2svScyxtBixeMqNmVsecqAQriOE3wmqaLoSVoupA1YzofP3Fw0jAsnEw6INBe7Wsj7mhnvddBYUa877MwSk5OcZ0ilAm/GVEXYwLgyABfhSgbXg2xEIAIdXMN5Tc53PTnzbgk8JvZC2Zk5wfMwhYZDphhfyt+3GWxGSwY5X6yDmKVsCc4TrhN4nYrB4+87m3rcJvJeHH9gmbPZDMlNbI21t59kyXkg4FUOxFRuNOIjcqDgnbQinp5YwXBr8xTNuY10JOHZqZHBn0UCahTSKlhNZpTQEOSqJ5k1qAD+81Md/z1XuD6eVZE50wYWyX2YGZUVPsYThZpp1hrBN9HSX6Oj+bpyIu8dHLdYEFkEtRYUxaPKzOda5zneufo/4+5sbvw8L454wW/kOwQtr/7X86s0vOda5/oTqDICoRR4PqxEwQwJ7ki20ost69/x2auJKfCb1d03vHdAvk5sHvArHQ6EHhWpk4usM6iQSZTMpkVxGahB7li+y8D/v6kuNVzdX1ieOpIhwcn5RK9K8jsdBUH2TqKu+VSxpqT7yeiM7hTonNdzA9G4bbPE0ssw+HTTkSNR/fa9nBu/rEm3rPYaqIKHZ24KvNI/vLj/z2cA3A/WGDnwxTb3n1er+YYc7T+yEYHg8N4/tm0b+nnUdlKcQ0GVwzUpYCsvSDY6Ci+GjmQxSTxevI5vMD103HthgYghzj41XNc7gmOoPpZPLevYbhC2FJvPn8iZ9sDtx3Gx5zZOY84V6n50karVbh6/yz6YWXxDRPlNd/vzSTtJ0AC4vxIsJAGa/yWthKQ0oRoTciuYqQinkdJKZjwRQVqtcUB1kXMUsckkvMZqTLMNwo0izTSAIomG415Yw2A1Ob3DBnL5SlcZ6ZK/kzdFSkAUC8NGJQfL8vebwRAOFfv3mL1ZH9VBHyTXBVdnQmm+yGhmNbMT5WqFFJE69Y/DIW40qVUH2+L/YrCJK0sJEWo84IejSkk847D60tOZWRh+2EsQGb11CRY3hLE0hJcfzYwC283h2pnGfMjJlNOdKWjva+gHclv22ueXN5YFfIen+1O3EYCk46EbwhlHqZlAM0m2HxVaicX8CYx5Msmvb9BvdksmxMEV2ORrUvABGT8DsvIMuoheWj/zbAOkcUQwZB5ojbsEr2Yj410a0GtaFMxEtPuR0oCpE31cX0SSx3TDAUDrbQVQVhZkAB2kWsiRiTvUiSWsyfF9lRZutMvZhumo2nrNbFb0wk1QqlEsOFw6rE2K3PruQ1jAIWoUUCplyECzkw3+RYV69QmSUiZsfrORDGQpYJzYyVlwo1s4Ju01Uk1QHbCHoSGo0fNXiNb4T1YwZ5Ntj2BRtlIssE8zbrQNjKNrpLzXgwuL2WvxV7KI6JKoMgxSHhK8W00dl7xOSo8BUYm/2SYr7+wbAAECLHyvdHhFBHiRnOZs3VB3CtvM/XwjaLDsYdC5Dj2kS5T4RCMTUK31hUkOfYfJ79xhA2Eb2ZUGWW74T1uaJUEpbIuc51rnP9V1D/tZnH/r6AzT+037/PcZ2BknOd6w9fZxDEa4ofHPakKPbyI3dMlPvIcKHF0HL36aQWJTry4iiacj0puuDwF3ny7iJx5xmw8CAJBSrIpM69kJLMjWl/oxbpiDvI65tvNP6h4ulOJDnVQS80bxB6/nTj6beBWIjspXxKS/N0qErU7Uh/p6jfibFfcUi440zDl2MLpXxx1l6kJPtGGpZv9pccppIpGHpv2ZUDb+oDf759z59txLz18bbh5Ev++uk1//b2O77vLhijxWa6i1aRbTHyy2MpDV8UE9hmI43n3FBW1mN05FlXxCvN5MV8QwVFcgl9PXBR94vvhcsIxc+vHvj2LyL3dzvYO5JOFLc9P7kQBsNl2fOu3XL/KK+DgBi+BpX9PcygsG3Ctuu501NapSSIt4gKwiBRUSb3ixFrv7JAQiHU//4uEq+lqTNlwACht5hWr2yC2YfBK+yzE2r+pBYZwMIUqdTKpNCJ5CCmhI6rP43pVX4/y1qLicX0M5kVQFEhm7vq9EISlBkkuSE0PaS9YzgJGPXd5pLGTTy2NSZLXxo3LVPi0VumzqFbkRslI4kZql4pDGnSqMGIcWg203wpL0teoRwZqQE1gor6E9lSKBXhpJmayFQFtI085RNVFRPBa+xHxzFuSElRumkB5IyOXFQDhzpSPBq6+4bvg+bu8risFR+l4fdOwLzSeYrM0nFmnZrvioHKTNyVJx52AhT9pXnDIe0wJ7OAaFFBLJKkvADohKsntE54Lx4mL9ldKDA2EIMhBpFKTJOYwALgNeaoP/EQiUVavCh07dlsRgrr8VEzTJZhsgvRxHthF6SosC6gc8JMnNe9jqQkv6d1wtpIWXq8N4yziW0Gn2KWU6QI3alczHe1CxgbsS7QVCNjll+pwSzXUcyChYGysE/KfCJcFFaJg1RE/IZM9ZgXSv7/DA4SBCzhhW9KMmnxckkmCeDRzm7LrJLCKoiXywUoFxnHfE+1RiSNGSy1z4ZYaWIGQXTtiWWg31h0pxkvFeWTpszmrrZL8r8+Lfd70hCKeZ/kOKatXgHJF8emvTDbZubgcCnnbgbAzJiBUYOAbRbGCwHbkpFjqB4Umx8CxSFQ7hWhkL8fU5YK+kZ8WkavCFXAWE/UfOJ31B3KT9NiznWuc53rXOc617n+SOo85jnXuc51rnOd61znOte5znWuc53rXD+K+tEzQfQIu99I6oH2sxZ+nviL3nq4Tcv0DmQ65w6K8kEiHstnUEkx5uSV8VIRLgPpZqTXBW4v5ohmXA0GVQTb54mfyhM9zQJL1fcJfwTbGomrHT6VcYRK4RtN/cWRYeNoY82uXzXhzQ+aY2XpXwXAEB8VxXN6YWqa4ytNlvvkGNfhvRzD43TNQxVRLpK84r6IvN9t6YPl81ooM7UZGbSlcRPfdxd8/XgrZpVZonC7adkVA+V2wE+WMGqUYkkrWGJgg4FgJKK1nOgu5STEoNEuUFUTh77k0TdLZCzA5zfP/JvbtxSvvuWb0zUxKS6Knv0oPhLfPV9y/LDB7A0mKqaL7M1QrkkMZlC4Z0VxQFgYUVggvpoNMVfGgopihGhPLAkzyss1C6VE6E4XkXQ3cnXZLmtsCobTvpBEh4LF7wHEA6F4fMn2YEm8gRdRoDpLp+pEQr3YpyzFebFNlWS/iv1qqLnIBZJIdMS0k/V1eGHOqHB70JkK/3Z3jW08fl8Ie6nxFPW0Ht9gSZ1BaYhFTrbI6RYAaRQmgDmK34OKCt/Mn7XuwJKOYkQpoaaVnWNGeZ/K0aM+QSwVfWYgTIWYwqoE9slyUg3ppl2MU6dgKEzAXE5Mk0b3mvGx4n32XGjeSIpGU0rCzWXZUxm/eIYcp5IhWAYvCTz7ULGzA69L8ewZbwz/IWi6Q0XcW/SoiL/LhomKMBmiScvEfTbHBFA64VzA5tSewgZigpTPT4ia47Eiei0xwVlOojM7w9hICJrTVIo3S1CfmCm/jD+dyqx/U2kxsI2DWRNMVGKsAtoKUyTlZxsvTFtTAgY5l3NCTbIWXycmnRgvR0JvUZ0wK+ZrrKJaGCHChhOmAmQ/FSUskNl/BZNQ+Tqu50qeHzEp4mhIs3lsPj56I5Hho87yotlDJzOqLCJVSgmKSFFNqEUqYsQ4ejDYZ4M7KlKrCNmLyO8MqQ4SoVwFxkvFdGUYrmbPEC3ms60YvLpOzFeL48yikfu3epIdFiZZWvxeZgbInJqlvDyHZkPp4XpllIQKpk1OtyoifZbATReGUBqqj1qMZkNOv5mfe2OiOChUUvTWMdWZqTOnVkVFsX/hMXSuc53rXOf6vesPLd35x3qlnOtc5/rbdQZBJqgeItGqha4cK9Fxh1LkDcOr7OkwNxNBMe00fqOp3itcm3DHl3IKTQ/w+QR3Az6W2JPESs4xjHOSie2zjruTuNg55UAozwrtk0RAsn45nvfbHTTeG+6ujnz4ueIUara/kSahfExMW8vw2UT3RWK81JQf9SKHmSnVehKatkgkoHiW193eMO0MfivHnWzieLL8v13BL5s7AJpiYpgsUzD86nRHei4wR82QjSBPrype3+5pqpFQeHrriEEz+tyQkOMcvSEGTQwKbRK2zLRznTAmEqOi3TekUaOPhrmt/M1g2biR//nma14VR46h5G1/wbdPVwC0326pPhqUh/5NgMsJpRNF5ZeISO813b4kfVvgDtmUs4D+bvV30YPGHhTJKGzGNhavgEr+Pe0Svkmki4m6GXEZqJm8yYkn2SNDA15h29l/RsCxaCRhJ6mcfLFE7OY18YKVrn+nMZl7WTVT5PO+vTSYfbkNPYpfRZx9SdxqjBmUvCYJH/J6+V2BbxxFBlt8rxmd+8TLwoySACTpO0r8RuY45l4tcajRiU+G36xAlIrpE2lMLAQkkESPlM+DNMw6A4mm0wSTmLUewWvx3NhEzEmMTYfBkYrsBRE1vY6U1chwB+HgUJMSYAd432wpnV+ihxs7YnOTDSzRw6eh4NCLTOypr/l8K8a7lfH87OaRD+WWB7MhZsAoRbUCC0EReyNyliANp37hZxGLRFs4dONRJjFk75NZilM6T9r2pKToO9nvGJSYGwPTaJhyHLJuDXpOuZmBtBwvG4pEGtUna0p+IQMTKpvVBiWJ2ZrFAHbxMMkeFqbNcr9ZNqEVIRuLjqmQn5ssC0JAMpVyfHCv8lpXi5dFGmQxR2dkLRdimpvcCqLMz+Hg8jP5BQD4yTEpAQNfeorINrIkzMyfpRmSSGJgBVqwkVhq0ilHET/lZ2dvmHaKUEcoI6YKUAbGJoPgr8TE2B7EI8YeRSoz+0GhckJPnzKShEijZtlSJWs/GgFXk8kJU/lvw3Ab5Z6NCrLvzGws7S7yh+zgcFXQ3TtsrzF9Tv3Jzw4zsgC6zaRJWi+pQCCbKx9/x6/mXOc617nOda5zneuPpH70IIgCfK0YLjReglnwlXxxtrkJVFG+oM5fkhOauA30dURNFpT6hEki8Zea06Zg8+bEqXakzkiTPE/fDZitbLv5IVE+J3SAKUc5LlM7lc0PqxzTOYMg2RRwuq852MBPXj3zTkOfjRrLp0T5ANPOYD7r4BLaK4d5lkuuJzEDnJvXUAIJqgfZfvGUSFYRyrl5UJiTJnUVRyNMi6OSqW4sI/bZSDrBk1omlv1Q8T7BdtdTFzm6d5LoUoAYNSnmCfSkUUHhy4jbCgIwM0r6zsHJSqMfWQCEMJb8JZ8xBMttdWI/Vnx9f8v4rcSeVvcaMwmQpW9HXt0Ig6WyfmGjxKTYNxUfTzey30kxXkXqL2XCf12NtEPB6aFm6g2m1UsjDsIqiWWCrRi+Oif7fOqlSR0GRxyNNMRFFMPBtDZlZmBJsghVBl5YJ7BJSSMUs19GLEBl08Z5Accy5cSV3OBZ+fmnfhp5seemO730ImBli0QXZeqs9MIkmU13ZRqdML1CHdXqTWHyuo4KJmnOdFgbLv2i4U4272NuhgFSzGCBzkCihmgjya5rybZq8V5JXkEPsdKk2W/DS0ObNoGQ9ym0luEFCyJlT5qyGhlUErPh2UPnqaEtAmHXiiFo1JTWM+QEHB817ejw3tCfCtKkOZmK53y/vbo48ro5cFO3jN6w9xr95NAPL8CitAJMczqJCiuolTQkZYiFExBCQVcnTrPBbB1wlce6gNJRvENGg+pXsGkGI8R3JXvFzOyxcr5W2YskyTkNm7hcx4gwJKgijMLyiMVLhEpAleR/B3ibzV/TfBygJk0qI/ZiXKJWSxfwXuNHi+8MatDoQS9+FwJ2KZTPwFhUKFaAYAGCEyRjsnnyp89VVE6kUSqDymlhbskx5PsjgEIAjji6JTkl2SRWHTkpZ9oJgGGyMakewEUxbQ2lJjQayrj8fbCbEWMS05XBB8XYWYZb8wkQozy446pGnU2gQbyFkk35PgGiHM8MJKWdR7sopqXZhJhBYzrNpOW5s7s7cbnt2e8qht6SBrMkSQHYo8E9i3F3/V4if+OLcxSNQoeVzXWuf/qadvD9/yon/PP/64w+netc5/r96ndZImdmyLnO9fvVjx4ECQ4OX2nGq7Q2A2VA9Yb6O4NtofrB4Bu9sDTEZT+RmkBoElMQhsc8cTVjQj1DeGsZLiVKZJa6RLtKMsIWko15LCnTuRnk8DWLREVozznpwKxfjtSgsQdNaxs21cjP3nzk659+ll812BNUHzTdjaXaDrhrz5CNT0NUxEnkKaYI2ZBRM05i9GjbLJfIU2GVmybbrfuoJ00y4LfZ3y+nMsxsieqjojMVh6SIO0XKiQN+kqYtTTkJIqglOjVptbyOSqQo0ZAKaQzCJqHi3PSB+r7kl8fP+NVuIvYW99FSPc8UAzEMHO8Cb64PXJY9vXf03i6mnlolkRyUkfFSk1ygvOv46fWTnAcdORUFhfUygR8dw6lYBrg6m0BW1cQ0WYLXdG25TP/VoCWSVkmTqbwcqx5yQ+XXtBk9qRdgRL6Mc1P5IsJSe7Wsg3mqrVQilAI+fPL+vI1QiqnqJyDIi9fVbOBopfmTeF29XEc9wbRNy7TY9C/SapQiudx4p08BnLmiFUlZIr//ZD5hqqBYWFJpEikJ+kXjp9ZmXnnQCJMgZNYRQRgXqvakKsssBrOYfhLFQDPaRH+pZL2XkZQZDqkz+EHzTEMcDc8mirxkvo5aWBfGRji6BcgYTvII/X4yYqZqPNtqYNoZhieHPepVUqAlLjkWWZ7gc9Ofnxv2BO6UcnSuHK+vFKGUz5i2luHW0TVRJCKTxL3OzfkMDs6fp+ILJhHzOmIFF+brl59Jeiumra7wbOuBx+cNPhYCTL147kSVgWEgqHxdZ6BEIYspgyLNZceXV88Lm0WrxGkqOAwl7VDQnQpCb4k5fla/iCZe2EEB9PzvvE7lw8lyErUc45yMlMoo688kYVAsoGFCebkn9SRgkI45tSufRwF/BQBIJhF2gVgpQp3P81GAUDEkzgynwggzBPAXiuiiyHUMmI0nFlGSYOZjAMYZmJjP20s2Sz6PSid5Tnr9IsZWGEV4vcSsK69wB40e5EQc1IZw1VFkJlQq5Px7l9l3GkjCSjJTEhB/WoEYChiu9RkEOde5znWuc53rXH+U9aMHQWIB7Z9MmM1Emaf4SiWm0RLfN9guUX+UqEFfr1PlaacYrqVxD6XIGZwETSxRltU9PN9UKJsnk+MafSkpHRFVRoY3EqnrDqvcIuVYU+3JVGaFbxTMEoY6kIpIGhzmYPjw/RXVV/eUn8kGhtMW7SUxZvq+oLsxqMYvVG+tE6qIbDY91gSGyTGltZkYd+QYSqDIjbfOjbp+AeRUokcPRrwFojHUH3Kz0EH9QdGnkuNgUC6iTFz9BWYqu5YmKtk8zc/naGoLaW6ntemnjPj879gaTKuo3lriR4Pps7QkH0P/KpKuJy6uWgoTeOpr9m0lk/zF+4BlpJ12HlMGNvXAYZT4mBBFvmNNZFcOsIFDUzJMcuuMk82SHU0IiukkbJs5nUL7WWIjrJm5eTZ58p1UTn1IYDq57qFYAQQdZrmUWgATmYLPjKEcjeqkwTaDMEJQLHKXl7XEh9r0if6f7EGgPGvzl6NXfa2W7UUrcc7JQpwVEv7FceT7I5brdH6WWs1rwnQCqpRPaTmG6OQ+WpgiTkmEaDnLKPJnTVlKltfiEvGq5DynzLqZY1b1NHsd5Em+UvixZNp5aewzAMAkYFw4OHROxfFGPGPkGETq45uwMC60Z7nOQZW8tTuudy2lCby+OPKbqxIf3ZrSoyA2AbOTEzUFSWpJnayl4qMhfRDvHpFLxEUmItdBY1tNqAWQncGShXHjWfyDZg+jeV2AALcqy3DULNF6kV5zdXnipulwOrB1A4UJ/BAvUe9K2R6ZoVBH6dfnpt3F1ZfEBbROXG07Rm94vT3y080TOiMaY7RUZqK2E0Nleae3nKiIc7T3fD2y7EVNcs8sLIq8TiEfe1qjq5f9KzIIM+WTroC8f8omedYYuRFUUDMuuIC7SUG8yIs6CEMplZktl0sPORrbr35PJjNy/CA75vM9L3KeT/1hbBGgCOgsyzMmSmIQ4CezRNOmoHMCjkLNiVBZ4qTz8YUq3ydp9QEyY0F/sMRNWI6BfJ+DgJWxSIQaupvsGxJXKV2oFKcv0qdg6bn+2WpmhMx1Zoac61zn+n1rZoacGSHnOtffXz96EAQbqa76xaQTRKahTSRk+Yo7JYpDJJT5i6kC18oE1jfgmwRubZhUUqiYMH2ifqsZr+XLpPYsDAJx/9PEiOjJfxKYdnbx5FAZIDDZ7FSPCjPopfGYdobxJkhT6hXlD5Zv3Q3bKwFBxlqYAcUTbH8L47PFN3aRXCQrU/2Dl0ZMHcXMsTi+HN/PjaN4VyQrLI/ZNHS8CaSNp94NGBMZBsvkCsZBOtbyAdxeRrfjKJ8dHYuJ4hr3G0llQlXSzc3nSHUmN7Jzs6cIVy9+rwpMpUV/EABEhxceLoB71XG57dAqcX/Y0O0r9N7KdZqHsk7OQ9p5XDNibeTUlTw+Zm1UVKRJo2tP2xTUxURpPT6sE+txNGusaW8WlgdIkxSzXETlyTWwNlSFgGdmSLijNK3tG7XQ91MGR4p9woxJwIJibQRDJWPj6TKQtgE/aoqPM+VivZR6UjBlcMLkaf58nmemQpjfI03X/H7fpMVQclkaRVqn7y/el+xs9MsyuU4mN25B5FK2FXlBuZd7LhRyTPMUPxqRxExbxZQvw3gZMaPCHlUGELNfwcz0MAm8Wr0rYmZCzGttNuJUUEyKMVliE5fmmCKuMc4+G89GkQEBYiJsE8GbT+Q98ynWnWI4lBxspNod2biRn33xkYfrmsNjky+mYnvd8tOrJ4oMNFy5jlOQ++WvHt7w/sMF+kOBPWlsazCDGGzKNUT8h1oBXWeviLishezloVX2FxL2mL4VnwhrImPrYDDoXiJT09bjGgFl7pqW2+pEzEd1U7ecdgX7+3Jhm0SjiLuJajPm9ZlBoHwdnAtYE7mqOkLSVMbzONb02dCi8w6rxOdkY0eu6p6uLVeWhAFswlYebSJT5wiTJrY5JjZjEyhIRSKphB7XezFWUUCZIoicangBuALoSFF6UuWZKEWOM+W1NwMpReTmzZ5hcrT7SthCaWXDzPcQILIdP4MS+RycBOye7/lkBIAK/fr3Y6rCYvpKUgIOZ+CDbNw7g5TJJfFPWSJyxSxVe/H1iKWiv5V7pHjK5qzP4pszXmUp5wtT7XmtJJfwNRx/nham1byWYh1wr7pPZUTnOte5znWuc53rXH8kdf6KA0yjJJcsFaXJUdvIcG1QUePaFyZxSr5Qls9JmtAsIwjVi9ejQk+J4vkFvRpWKQmgeoUZDH6rYDfB5cRQ5sn1KJoFMwjYUn+M1B/BtdnLwin2PzX0d9KM2pOC3xYcZqaJWnAWqsdEcZAG2ud9TFqmyb6phG3Qyhd6Pc1T49yABvlyP7ML7Ckt3UJvE8oktE7EqChLTwyG6SJPN3uN7ZHJ9iB08pfmrvP5jNbgN5rpJixT6fkczHR/08uEu1WGkBMQ9G5C7SbGqNCdkuZ+G9jenQDYVgOjN9w/7uDJUd0bbCdSI9/khsbJ5Li+6KnLUeQuT7UAMKwyFZ4NnS051YH6tlubP6/xgxVPCyXMAr9Ja8PUaWIVSVWUie480Z6XW4J0b7AnhR7lPeN1WjwW9KhwVtJrgOw78yLZRQMNcOG5vjmiVOKhuEDv7brWvIKU5DPy58fF6yXvQwZqospMlZlNQF5HeTuJFx4WZgU55orZk2F5EwKkmVam5cXz+p7hIjeFFoITDwIVwPiUmQqzLgDCn/T4SRM/OsqPemGDzOczZYNI3Ysp5cx00NlgVkUBVqJLeT3JZD1mM+RU51SbmAhNZnoMapmMi7REQTevWRajURB2BZOmawv62qJV4t/d/hb7KvLNq2sAWl9Q2YlL11Gbia/KB/60fE+lpLt9um741Zev+cvjZ/z2cM1zV9F1BdOzIGbFR1knppfjC6XIvWZgMzYRyiAyCRdx9cSf3z1wWcpO+6j54XTBMFkOxxptItumx2QAwplA6wsOU0nvLUYlqmLi+WaEb2UfdBQw7PXFEZOB41naAhCCxgMPXYMzgee+Yn+qGPtZkwO29GybgYtqwJmAKzxhZj4kjSkDTTNws2l5amuGydJlH6LYmU8AOmwiln4BOVQRBOgqPYXzdL0jBkPIYIixkYtNT+0m7t2G7rki7a2Ad5mFYi5GPtvJDfdbfcXh40aYQvO9O98vJkG5Xv85AYe4phklK/+tJrB+vedSZ2U7ar1d5lcT5Oe+3Bv9Xd63OVFIQawT9CJPdA+JaBTtLyYGm59bUTya9CTPOt3Naz7vokvEKhGLSHnXrc/wzIbclGIM/M2L5/G5/uXqd5kh/7k6s0XOda5zvawfEyPk7Ityrv+S0v/wr5zrXOc617nOda5znetc5zrXuc51rnP9t19nJkhQpHcldlpHcctUvAn0rxTTpcIe9SdGjpIek00BjUzn/HaWmmSmglG4Y6J4Fj+RaftiUpMyRd8LXcMnR6olahGEqQGSVNOPFtspmvu4TN3rtz2mK9iPlvFSNP7FkyJambhKaolICuyQMCPYIWL7WW6z6r11kGSbpNbJ96AkBih5AAAgAElEQVQ0oRR2hc4+ESquxwZg94bYatqDI5mE2U2S9rIT7fs0OGwr56B8SsTTeuyQrTiCbHfaQusN006Lph+ZvM/a9+Iwswg0vpFzM9xq4qWHxpMuheZeFdMST/t0bOgfKor3FndU1B/kuo5X4K/kd8xuwhWeu90JrRL96FDTakI4p6iYXqFbiJ1MpU0dloNIeQqtbTaurcHkCWrwGlt46mpCq8QUDFrHJZ0mRM3xsqI/Ofo7kT+Ynx+ZT3Pwmu7kCJVDj0LdN+NK3Y8O/C5RbQcu6543zYFvnOddeUnIHgXRK3RmtjRvhaEUnVqo72QPjmhm3wiFPbHcD5IWsy5dFfK1y5Nt8TsR1oo2wlTRg/hvgLBK7AlMZhkN14pQS1QrzEwS8TNxR5H/aJ9QYY2dvrkSw517vSMeKkyrSJu0MFuCVZClODqt8pvZLBQlE/HoVn8Q80K2NF4qkVe4KKwAFyWpZWbk+N8xfNXynFi8X4KYekYcD27DsfBMV4Yv3BNvLiWV6BAqPow7vu8usDoyRksfHXdOWAf/pvyWPy/e8b9s/oaPd1ueQsO93/HL9jUAf/n4hg+PO9qTE+aDTZTbgU0pTJLX2yMbJ9KXynjuyiM/rz7y3SCR0XtfY3WkMhP3uy2FDhTGsx+FZTEEy/enhmNb4geLMomLXcftzZH7Vh4MalRYGzE6UtuJQnumYFZWSFcRgubjwxZlEuHkMM8GN8yyIghVweOm5Lgb2W56rrctT1mbNY2Wuh652bS8aQ5cly2HqeJt9s442Yq0z6k+XoFRuN24eGvEqIjBUJcjr7dHwk5zGEoOXWay6ETtJj7fPnNR9nxjr9hPO1nL+Z43JnKaCl7VRz6/2PPr0TK2BSnfT4lscDxpMYTNrJBZIZdUyswW8Rpa453nZ694imjPwlqaTWtB7sHo1velIsrPMltGjYpYR3SriU6z/S3UHxLdFwauZC10NmEPRvbrxhM7QzroRdaUrEjcKIUZY3TE6ci2kPVT6MCvHm+WVJ9znetc5zrXuc51rj+m+tGDICooiiedG3z52RwxOpaKeOVJJjINZjFMVFkiYo9KwIbZTK6YjR4TSQlo4lqRkCif5SAz0KLT0mSaTmQx01YtRpDyJVVkFeNPJo7JEUrDeCnvufqlprqf2H4fmJ417WtN0lA+ZanKDYwXUTT8hcYd1y/VkJs2L6kAQsuWONyQ5QGhVIucBitgjJhjpkVmUTxm08IAUwPDK03cekyTQZArjemsnKs+ipHkqqYRqYkH2wk4o4Kmv9ULmMQL80ZfKdxBvuxnCwXcSdO9cfhtpHjTUhUTUzA8P4sHg3lbsvko5rB6TLgu0d1oxltPcS2IVllOGJWYosYHI7G9dUBlrwhtEinCdHLoVtI4VGeIGeRQGjGbVSkDAAml16hc28jvFTZQWI8CajexsaLTiCjsTaAPjs47Pp4a/vT6gT7IokpJ4ZPm8XVNSoq2L2gfq0+SInCRygamIOvzTXMgRL3EtwIMrWPcGJq3TuI5+7W5mRuwWIAfxXvADOvrflqBhJSlYGZMizlrKLNJ5EDWYcn75/tJj+vvnz5XtJ9F0tZjsreL1pEQNNNgmB4txaPGZANZn+00tEpU1nN1dWK/LWh+0MvnzwcRNrMOJgMgJ4XLwFsy4DcsC88MIs/R0vNhBp2BzJx4tInyTMj3dCjy/bok1Ahoavx8jAoLxNEwhZqxCvzfxc/59faW21J2YgiWj/2Grz/ckLLJpXWBwslG/sfPfsu/2rzlp+6BV3bPK7vnC/fIL8p3APy73a/5+s0rnqYapyIRxdXspAxcuxNN1v84FYhJ0caS37Q3gMhxCh24dD1fNM8AHHzJlNGw++OG/WODai06N8vPUfHVTx4wX0l2djs6jEp8OG4obWBbDkxRsy3G5Trtu4r+IGvPPZkl3ni+DnpUpFYxdYbHrePVz99SXco5GLyltB6tEq0veFUeaeyEzg+uDybwxIY0GNQoBqRVLd4iAI9tTd9pQtQUJvCqfOZQluwrAXreH7c8tjWNG7mrTlS3E//faBkeZy0j+NHw23c37C9LPtsduL04sXeBoXT5fjCko8UeNWnSxEKMTxfPHJNIOvvNaERqGSGZDGJk42wxnlaLOc9qSjpH4gpQp7xCbSfIt3NKiqqemC4M/bXBHUu230a2XxsOf5GfS1uPLyKqM9jKo5qRqSwIx/xcUQiINGkenjdoHSmKQD9HQgfN8ZsLYjiTRf9bqd9HMvP71j+FtObv27+zlOdc5/qnq9+Vivzn6nflI39oKc3vsw//UDX/+//ze2/nD/F5Lz/3XH+c9aMHQSCbhOpPPRBURBgBTcC4iCoCvpDTFb0Ss75CIl5jGWXKlz0SgpsZIoqpl7QHM4ovxpqyIEaL0Qo4oQdwUS8gSKgyuGIT9rZj+ioy3Tjcnej739007L4uuf6bCXecSMrR3ekXMb3g30yMjWG60Lij+GuolxLvqLA9zPG8vnpxDnKKiIriPeCbRGgiqQmLX4Y7GYpDEr+ODlTS9FjILAm9mRheKVCGZDRmFMAlurWpVj5hRoNrE+WznLfFt6IAXwvoEV4Le6G+T4vPQ/ko7JVxMPRFRa9LVGso72X/qnsojsIoABguFO3nifKmm0kKHPc1qZ2NH8R01VaeIk/XrYkUNjDuDKdThT84MT0c5qmwHAcmEZKSZBudCDmVRJuE7y1KJ1SOXS2riU0pB2F15E8v7/lq84gh8rRreJ4q2l66f60SV2XHz18/cOVajqHkV3d3S1N4nAqJGh0cHyeLj5rLsqeynmOejmsdoQGzG+heX+AOAgzZbnZplescR4XphdWRzJqyMwNls8+Gyl4bKs6+KizGvzrI72r/Yi1puRf6a0X700DxqqUuJ8rc/Bsd6SfLFAxHV9MXDtMLMOkziPTh446i8jTVQLj2hI8FtmdhisTM5IqWJW5ZfGRWRo8exBtFBZnC2zYt7C7byT6HSkCdcWeIBavfRpEjtHPqCknAovl+mo0xkwHbGnytue9v+GCuVyPfKL4S9tGiorBUVIA5Efr/+HzH/3n153x2+8xXu0euXMeVa/mskLjmK9Pyb5vfUOmJRg1ENJrIWy/I6EPYMuWT3saCt8Mlv2lv+Kv3b+QYosY5z3214aLs6bxj35f0o1zo9rlGP9vF+FWPkB4KDtcF/93te9lGUnx/uuQ3391yiIr9dpRY3ewDVFnP6DzH7NeiMoj50jcGBESyJ4ij44fbHT+/fgRg5wYOU8l3j5ekpPiw3XBbt4uvidGR0nlZ811BTIqmmBYGw3EoGHTksK/5RiX8hUartDCvjm2Jv6/5j881H24P/MXNB35+98CvuWE4Cltkjtp+fCjov3D85PKAaTq6vF59kCjl1AkoSlKkmFaPnCRrMEUtXjRhfd7Na2l5vs4u0C9JgkZYGrGUvw3lvWGgIG0ysFp5do3c4wDfHl9TPmp230RCBmr6V4ZURuxR41UJFxO28Uuyljpa7EGjjob0VJMUdEWizb4oKih2v9a8Hf9wjfW5znWuc53rXOc6138t9aMHQZKB4S586o4SWRIH0qDxXqHLsEzslIVEJDT5y65NMK2ml6lOxE3AK8MQhWWhJ2mU5pACPGjEMDUqiVJUgU+SRZQFUIQLQ9lM2IueizzxVJdHvr+7ojiUNO89zbsJXxfLNNF0ClsGVD0Ro2ZsnEz+5tJi4skkMZG61XIc8zBzlIm+PaolijJlI9T5y75vErZVJCWNc7GXONWxki/iqvbom4G+skwbtxg6Rrs2zzPbun6nKR+SJGCc8vajIjpFconp2uOv+SRBx51kqupOoL8RhoNtoThkWvsg0/tQSPzq8SvwXw40NnJ6L7SA6ntL/T4Jm2WjaD+3hNcjmSDApBKpHmmKKU/WRRJjDp+mTiSbFvPM6FapUQqK6iBN5RxN29aJU5ZZJJfY/0QmzkZHtm7grz+84fQkY1/tAvvLEi6hNJ7aTPx8+7BcxiEavjle8/zUkEZDfyx43tY45+m7NSPXOi9gw5/1DM+O6YOlfFzp+aaXWNY5ZtU3inGX18KQKfyjsIeizVPrF41bdCqDWpKMNJsHg4AKvoHus8jm8wObciS+0NdMweAzi6VqRkaT8Pn+U3mt6e8rhjoy3RncZmS4tdTv9BIhGz2oqPFNWiRW87WX9SzpO6FcZRnzGgRZM2ZK6KfZQFgxbtXy+36j6JOwxWYzSz29YHbNrBAPDMJ2iCeTgRK7nOfFGDjl1Kc5Vhgoni1+Y/nhquaby1fgIuXFwBc3wtp4VR/5WfPAjT2xNT2VmpiS4dtRmB5/fXiDz6yOzjs+nDYc25LpkN07E/QJDnbDD2UgdvaTZ4I5ihwrafC7CIh84ulpw9NO1uNNZrXQGezRMI4aXOQ+38gXTU9MYi4aWss4R1vP0bc5plWPAiDoCQ7f73hfyImorOfjqWH4RmKB3tmGh9cdX90JSFIaz093T/iNpvUFz0Ml7JNBmBxhjpF9KHjeO563G2zpKascS/xUUb81qGh4vnH81Z9r/oc335FuFV9HOY/TocTtFeWDofU7Pvxp5KIasEYWdGk94UJxGDXpYFdpSS4VVJa7aExOF4ouLTHPocrMEZvZI1HSZGb2jR4FzI6ZNdO8Tbijob+VD5luNGHXclcLS+b4s4LD4w3XfwUXX2fm2ZNmvNIUe0jaMNwY+s8ndGbpRZNWppSQp4RZNd/TSkDml4ky5/rx1B+SVfJf+nlndsi5zvXPV38Xc+IPyaj4x9a/1L78XZ/7d7Fn/rnqzFD5x9ePHgRBJ+zVSEpqiWZNQRGdkTQPr6FXpN4s6QEUQnOePQiANQIV0WynKhHrwGg1vhZpzMxwgMw08aBcbjpqmfrNk2XlFTpJYx2eCvqNodwO3I/SHHx5+8S/+vId/+lff8V44bj8lcedElMjn2F7aDuLayasDbCB6NeIXW0T2gRS1KSkCIVdEQmAvUyEVSJ/kVbowRIqs+5jlGjTUAmTRU/gjgoZl8N0oeFVz/a2pWsKxt7CuO4DVcAUEa0jx02Frw3VxxWI0ZN4SSivmK4Um9cnho3j1MmyNU9W0jI6RfEM7pCW5BKQKF/fSMqNbxL8SUvlAqf7hs3Xso2LX0ead8LKmHaS0dr5Yo2GDHCqag6Zrs8kk9/iQa/XMYKa01By4z032XoSYEZPQF4j00avKQ0FtKdL/uNuK03vZmR82+Da7AmzMTwlRT863m22VNZzXbaMubFu7Eg3OdJoUJ1GRUM/GPoirmkWUREbRQyaz++e6C4dj5cbxsfclUWwrca0wgyKFqZdYpp9U04ClJle2ETJga8kXQJA94rkYpZKyZpJJmHyMcQyEpvA5q7lbnviNBZ0oyNkqr2fzPJvrRN1M6A2CQV0OVXEdBW2Mwy6RN0IG8QfCgrBB7CtsDmUV8RiBnIgZQmM7WZGloA7kqzyghmVAZzqOaJConyOmFEvIIo/ilwrVCKNSUYSPGZAMJYJcrLOHEdqgoBKJgM1KiWilfjaZOS+md8P4jFkRgEw40dLtOAbx9fXwgr6VRX499svKUuPUokmAwejl/vtcKzXlJXRCDDrFepFEpHO4GbSlqKT12cZx5ocJE26twHdadJjwX8wwia52LWM3gqrIYLuNAyaTgkI4b3G2oi1AdUADSgd0bPHTn7OBm/wR4c5aeze8P69sFmUjcTOUj1pYckFzRAbfpPBxt2249XmxEXRc1O2aJX45umKYZgzpRUpyXUoPxiSNfi64LTNrKjMcnFHcEfNgSu+3Z64LDoutwIwP3iDry3Vg6L6oDk0O4ZrR1HIM0CrhFGJzU1HVxXrsc2spNEQrEY/mRVEcCv4i8rrbZT0GDVpAXMPq1zGniTFKjkBH+sPafEU6YLh+aLhue55VR35xc09//4XNfu+4eLXcpzNvUgM9QSuizTvFU/e0X2Znwn2hVwtrDI3SZ+SZ9h4uT7HznWuc53rXOc617n+mOoMgqiEUokYXkwiFKgikLyGINNc3enlS2zYIlGuJglQEjJbIk9VTadEGlEFUhNIlyNjb1GtWYzpTJ4Wq5D9GFw2WpyBlDzJJincXpNOmvFgly/CX3eO27sD5S/2PF82ROeo7teJpApg7x3TpYYyoHSS48n7GICQnByDkuNAsU5sYaH7lw9pkTdEx2KoGd3abFJmEOTA0piOR01LxfRZwJiIqjw+uMVbJSqZSGoN5nqgDyW2NZ9Ox/fScMbC0jYlzXbgs1v5gPvLDd2+wnx0qChxkVMlbBTIUbibxHQd0NuJ11dH3n53TfO1Y/dNpu8/egEuSiNxwg/SiM5+E7ZNJK0YrgpCmc9BmSif53OUsgfGLBvKjZBZr2MspMFVU46eVatJaCgU0WhCJ2yVsbXY09p5JJfAa7qHmr4tUDrxsG2YcuPrbBCgQMm1T0hMbMqeFfPFTq1lmjTcwE8vHnm1OXJ/tVk+px0Kht4xjAbtItoEfnIpU/9DVxGjYhwtcdIonWi2A6932az0uBGjzNyUG5UwOvJwkubdmkDtPFZHusmxP1VMgyWO+SRlNlLSiWgSofBcND1KpQUEmTZJ7plO43uLrT3DjcVmoKU8yesqKIYbAb2mS4kABTAnQ7EH0yXYKUKVGC9Z5GPjhZys8dHgTgl3SstrAK5LcC9yGd+IgXDSq7QrNIkwG8R6FmaXgCZ5I2oFypIWM+D1IkljPoMowroB2ytsm6VX2pBswaTltcdCGtnZvFVFhfUzCKMWg+fQrJILFTITgwy0jhnAg8XwlZgb9DoSdwHzZEnfC8jxWJUCBtssD4pZZpf3cRorJiteRroIKAVGp0V+5pwXIMQFeiBoK/K6/eqBo7OEJjpIPuEOCh+EifKwLTjsajbNQFOOIqF6bFD5mZJcQtWetImoe41pBayaMjg0XUb6N4HoNOWDYvOt5pc3b3jzk6eF6VGUnv4ugJJI4uLeMI01YzZ8JihM47m5PFEX02IeOsdmd6MjRkVnavxOi/l2GVGZhZGyv5Rp1WIwLGysfJnmZ3iCqU70r0Q6NAN27qiYPpZ8o665rU7s7MCfvbnnP3HHYyn3XP1OAA1fK5LWNPee3W8Uycp57j+bGK8ioVbLfhBlDQHEUoDOdP6GcK5znetc5zrX36p/aZbMjykC+Z+qzl9xomI6uRUEAGlUXMoTUzF0VAPoWfrQa/EGzQ3W7KkwEynmxsJjSMWEKz3RBbxz+CIDAJ3GtvJZ0uRL4zCbjs5TWZXTUUyrlkk9gD+UfDxY3vzinqufdXxfXuP/plwaNxXB7RUqGEIlUhcd1ULPngGYUEmzgZZEghRfsEG0NEbJSPPonqKkymjZRn9lCIUca1IiP7GDTNEB+pNBT5p23BKbSFIJezQUT7MkQRNqQ6gSYRvEd7JkuQ7LsZ9ENtB3FaefWZqvhLmxrQe0TpyAHkcsRIoy+zj4bYAy0lx1XG86ntqa+tcFu2/i4hXRvrYMV5pxJxPR4kl8InT2EXFtxLaR+qNmasQ41tdqeX0GbMRYVEmSyovpaXQZCDEKO6XsjwE+x79Mm9xAV/n864S/iJI6AbjtiB8sTEauRVAcgdjPsofMrHH5PTaRZrbNYkWgUFH8Sp67isaNXJXd4pOgSRSXHpu7LK0Sx6nkTS2pJo9jg81GnL13jNFwVXR8UYtXxXf1FT7p5f2l8UzRcFudlu3tx4oPpw1PTxviyaGmNZ1FxcxOsCK16fYVpfOfJFOE24kwaDHDDIqymmivNP5BmvPyKUteTjDtZB1wN1Bkg9ppa+meKwEK85qOr4clQYfswzDeGEyrcUdN8fyy8RSAw3ZAkmdCWL00CSmDDSplTxIBNJLhBesnCZtmknSaUCVhkMymoTqzbcL6ntlEE8S3RwCOLKNZpF6rh44KLEwqYZrwqR9HQu7rCKGU9Tg/a+R3czpQq/EG2HhiIdIJEJlFKBPjq0Cqojw3R7PI+Mx+XXt+Y4kKhsrK8xRQJqJ0NhFOoBtPKiIpgygqg63TZVr35aAXlkTqFP5keHYVTzPAdTDYo7we6oRH7ofhJi1g8wwOpzLiLkbGC4PfFNQ/aKrfFLwL1xQXIoJLUaE2ntEm0keL6QWIDtk7RUVFGAzHwrOpxuXUltmjQ6mEVml5zQdN6TxFBlneP2+JH7bYk/rELHo2AU5GALpYJOIucLwQqeUM+JlW4Z40Yaz5q+oNf3bzkeuq5b//8jt+2dwB8PR2h90bQhkxvcL/2lI9JDbfymf42uKvPOk2MLVW/Fv8KodJZr4nzpKEc53rXOc617nO9cdXZ7Lruc51rnOd61znOte5znWuc53rXOf6UdSZCRIV6pT9MObJeVDEELMcRSZz0a7+AWJgl9kgaaW+vyzlxYdgKgyjdug58jYnRUSTmKzG9HPEYpRpaZ68hWwgSFToTmO00LqXeNo9mMHw4XbHz14/8OVnD3wTblE5AtGeFLbNk+Upew8kSaQAVkbJpHIixsKEB/I0divvabUiVMKCsH0SCUj+HYkizayJLmUDykz71mBGTfGsGK+MsCIClE+ZRTHOfihi4jdtk0zX83XQSqQtZkxUj5HNu4QZLPfjrfzC1Yi2CaUTYRfo62xAWmZa+27EucC2GkhA++2W2+8kFWW4FPzv+RfgPx8oNyN9b5m+rnB7tRhu+kpTKbBdojgl4qCwg6K7y34WjRxDKIRCHm1OH1kHxKggU/oYxKdkuhB5B0DYRqHKlyK/UjaibaR8kU6z77JsKcsZYmsxz3KdiyfxFRhug5izFlHWUlRLWlGKyMTeJNq25Acu6Bu3mJPGpPh8+8xdeeTWnRii5Vt1RRfccgxXRUetR7RKvBt2+Gj4oRcfh9YXjNHQeUeIGqMjUzBsnJwEoyMf2w1PzxvSQ4kdsneGX9daMqvhqNpbnk0j1zazQVwzQp1ZW0bkNtVmpH8l+2g7I4tX5XVlodkMVDnRYywN+5+ZbLIgn1s2E6GQteIKj9aJeKmYJkPXFowPdmFJuL3CHde0GRUk9lrPRJJe5dSP7KNgEyp7U8zrOblE9GrxuolWibQlMz1UWj1m5oQqiVFd19ESG+1Ypvbz6/P9JP+R/61Wyc8sd4lG/EvQiajUQmGbGWmmz7HGB403Bsq4GI4We2EV+VYTN/OOpU+kgGZEfGZOwkQJpUi95oplxLtsnltJHHWa419fyhKBpDShTi+OUWEU6IMiKb2wepZjnMTDJmwT6bOecTAi1ZkJDTnS+urqRFdPtHpD84OmeGuZZtlRGcFF7GZiAuLeiInri6QUNSi6x5pxY9E6EoPBWNlJpRLWRv709iONHdEIM+T/Z+9NfibL0jSv33uGO5jZN7iHD5FDRVZmVYlG3VKzYIOEhMSCDVLDigVCLBDqDWokFtCwhSV/A2ILEmIBK1YtikUvWkAjaOgiu6syKyMjI8Ldv9GGO5yBxXvOveZZ2VlZVFVWVpa9i3APN/vM7j333PPd9znPUO+3ITge006lTJWV5DLTt8pESNBej3ReY7VzScB5PCn1aP/ljv7HDn8QTvma7yfht1594NubBz791jMAd683/Oj5lsZGnoaWu+tbrv+xY/ulrvDbzw17Z2EXyNuZHI1KQuspBlHZ4oUIcqlLXepSl7rUr2z9cbKci1zmn10XEERYdSxnTYWZTGka9GE9dnmh72u6gXYUFVwAVpNUXxjhxTgwz57YKMAhpUGnjWSfCN4uFHVM1hQa9EE9V312Y0m9IR0NyVWvCqVS80XHj+QFt9dHrj45cOq125g3nnzviqRE1ijR4k8QoKSVqKdA9h/HWOY2IV1k6lQTM73UNAd3NIvRY/JK0XdHbVLU30KQW21Mk1Pvi+1XCX80hF6lJBVpcYMmcqQ9uMGw/6yk5ZRjdEcpCTIKQPlDYvd5XKjxp9cdYZcRC3Gb1D8DkOJr4n3EmMRhbNh/2LD7Q4sbEvPWcHxbJAC/eeS7b+5wkrg7bXg/OKYbizutPgnDo6W9z7jjOmbTtR7j9KIYgtbpNBcApTRMJqgPyHQN060wbzPxOuCvC0CQRadPFuLRkKMQozCXOTlLRo5WpQEnNVq0z47NT4o84akCSgoyhY3VpqpPsKt5yZlsDGITMRj2zx3H41lXChwnz9Ou47oZmKLjbtjw1cPVMo63mxNbP3HTnnh32rEfWw6DXihjMtNkCZMjR1HriwzttpyjTcyzJZ0cpkjMQJZx0+Z79dgxg8F+1apco03L/eK7gHhFCIbR07Yzu2+rZOfZXhE2Dnsqvis/RePftBPu00fu5luae/V/iVGwrqRpuIhIpu206Tx2MwffUbHN+eBp3lma51Xqco4a2hHkzhTfHF0zKiBQ76swGr0nTlI8RwQ7rYBZ8tUriAJeFLlK9SbRwBaiF+arFfBYgI9iuJmtAh25+JacAwAq71OTVpmlJAKVtUHy4i+ygBlPltgncrlWoa/nK0i2xRBaliW0pu3UBByJZ2sV+m/VBDcblQWmNi1yGY2PkoIOsKRvhV15uYwJo14H8mrkWz/fzEIaDf3ridgapsaRxtV/ZnpoyRm2/UT8xpHxtMU/y5kfhhBbQ7RZ5WiNJe09y2TICijKYIihIZqMDHY11vaZsYvcbTZMjaWzASOJx0kH7zR55muNW84uIW3CtYHffqOpT6fZc9udeN3tGaPjYep52R6ZdnoOvyeZ0/0t/Tth90PDIV7zbjNw2xx52yoI8t2bd/yNqy+I2fC/P/wG9rPM+/ASd9Kx37zTyXScO+bbiGwCvg2YEqs9nTzsfyrX+FKXutSlLnWpS13q16QuIIhk1banVQ+dRP8jCTK6Y5qbRFpyNbVx0IdwWYwOaycQ7bo7KQlkNOSgngOL5UaTNG62TZqYAmqgVx7ErdWmUEwmmkxuhOgtcVu073uDmVWrHuKGdy8b/G5aHmLddiYUBoA9FeNJ0V1YUPbKsttsWJgGi0DKJ1w3Y6Kd9aoAACAASURBVDaZlIR8bTAuMgfLaSjTZhZkNPhHPRYTSn8w68OzmdVLpLvPS5MU+jWmNLZSEjH0xeRy2Tmu/188Wzth3grDC8P268jV59pRtY+GaaeJHcdPrTbNlsXk9mhaZfIcHLsfONr7zLwV5p0w3ep39JuR+2PP4dQyn4rB6M1MUJIDtokM3xRODw3tB4vMCoKEwuTIb0acj8zHBqZq8KlAGCgLI3mNYZYXE00XuOlWhsJXH26Ie43c3dwZbSIFYtcsY9IcNfHE7XWuuSNsvi7+HUHHT5JdDHZDL0wvLEMFAlzxwSgXN0chzbIYYgLcPzU8bjcr+yka3Bd6DMdt5tBulYGxnclJ1CC1sI6yVyaATLJYCGRRk1eA3CjTBZNJ12Fp5k2z0qd8mbdhtiRxtJ87miSktsz31jLf2GV+TqVx/+7rDwA8thNfX12TP7S0dwrcnU4NqdxPXQPX3cjw9sAp7TCDEJ5bQgEdc1YwR/0cwJnE9mpYrlO4Mdyba2LnsNPqqXNuZNzd5cX/JdsC8g1nIEgH40tZTIbtlJEHlt33mPQ1O6k5rxvqYJa5ZtSQVYoBb/WXqcADUuZ/9QExeYnMPi81XxU1Xd2feZZ4NelNjXrU2JOmPtnBLOauqS1eHiXetp5bnUqxzYRdAW/Gyjr7mClXxy1bZdDwbBemSGryyp6p63GXSRXQK6BOmo1GkmcUoClglhycgi6jYRj8ajtbDtAeDWYU4sny9Mqy2w08vZkxc/ORpxNJCM6RmkjTz8xnoFqaFbiWwSoDxqi/jRnPxnEWvvxwg7ERWxhZ41EvVD453KuR3Xbg5fbIi/bIm26PK4N0N23ZzzogWzexDy3H4BdPkU+vnvl/X17hjo7NV5ndj4Svv3mNN4njpqwbWfjEH/Bm5nHq2DYTp28/83TQhe3l/y1sv4zY0bD/zDL/RljYUAApGuYX88Imu9SlflXqzzu+9xLNe6lLXerXqX6aKfKnZYb8SQxhf9VZKBcQxGSkq0kw1SBQdyNlMGUXXU1LK4BAhjybVUKTRBvNM4eV3CRiKgyMlJcUBVNiQ1LSHc7lITOjIEgxagxZ2SBilh4IadJixhqdIc4GMzmaB8EOnrCzxKu4vFe6iNhMbK0eo6hcQt+gx5RmA8GspniluSQIcS4MBMlYF+nbmXZ3WqjxIRrmaDnetkpjT6LATv2M2eAeLeMLgzuVWNKbVXaUpQAlg8HOBQSp7BsgXpXG+CYzvTJl191yXZpDf0zYORcgREheisFgGeMng5mheRQ2XyXmrTBdiwIxxTx1/7DBftng90IzwnSdmV8k5Ea3530TeLE7sr9qeb7aIPdeAYWaohMNczDK0qiUeSmRvIDxxRz1xcSnrx9pbGSKloeD7grnr1v694buLtM8JWKjjXBtXsPGaLNrtClOVk1Z5119ECysgpiRCcw+EwZTzGr1Q1KTiX0mO53LkgUzqNmifoJKU9KTLcCfNqhVOiUJzL2DBNk55uuMuIwtKTapy5oskUtTXCQbdjGrhCz6M76faZqAtxFXgI85WOaoO+lNGzgmIVvH9icrOym2wnByeh4GUmuYguHDVhNuvnX1yMbP/KS75rTrkIMl7j2nmgrS6ge9vjrwxeSIP+kxz5ZcdsbHjUF8IkaDtQnvIp0PbBudB0Yy8Y1hv+mYZgMnixnN2uifBB7VONXMSW+vAnTUkmiIvbKCkldgwE5rekw2Ki9LTpCnjCQFHGpSERT52WPCjrLITGpCTfJC6HTOgX6/OUuPochzqvzGjMokWgAKUYbHdC2Mr6ICvRGaD4UVAoy3+j4JCspkW+6FMxlOdMokS32ZC7IyqxZgo7BUTAJ/EDWJRgFPiRoZrtIddGKVddI2iZyUQUG3fqbUdXRQ42aJEO9bne9JFlmTGQV30ntsMg0nF7F9ZHoRF7mLRF0LzWCITw2pD4hZ1y2krMuu6JZcIkjGNCtITob00JCSsu7MKLjy+XYUmk+f+Z1P3vGqPfC6eWZjJn44qMzvae74/PGG3xvfcLM9YYuBsS0ojZFM82JgiD2SLO1dxn7e8Xl8ycOtSmZ+crjmph3o7MzDUdea635g/p7eB0/HK7Y/NoX1KHp+QCipU9YlXn/jnnt/FpF0qUv9EuvPG+z48/jeP28A5Rc5tguIc6lLXern1S8z1eZnfdevEjByAUHKDqHYRC45jmIyZCGVHesF7KiNb4Zs4/paOANESknx/iCJghizIUe76spFpTSVZr5IcaZ1tz7DIsdBUClAeVgVn7Th3yUkGswE/tGskpzWwDZgbUT6f/YvxWwzBJBFvlGAoNmSRkOsDb/NTJ2n7Wdc0b47k2j9jL1OWJOWXfRQ6O4xC4erjuMLhzkVvb07kw/1UYGT2eCejQ5fifcEyJuojVmrshYkc3zcLkwTOyoTYt7orrSZ9edrQ2WC7uo2e/V3ma6EsFFQoNLz/Q9arn8/4w8REzPDjeX0xnJ6o83EaeeJ0dC1M+12Ytw7zLjKi2Tf4A+yRJtmqTGqVT6g7AdrMw+HnpyF02OHvdNOdfOV0L/LtM+KPrjxbCccmCf1YqnNdOgUEDh+VnZsPfgngx21AddI3+q9UqagE8JWVCLhWRhA1ZNDsjZpZuYsslmBkzo33VEjPDV6tXjILD4MCujFLi33iMxmkQTZk9HI6DYRfcL3I42LzGWeTMFyOrQYm2m7CeMT023C/v76HXEEkhB7bfznKzCj5b3VnW2RzIvuxDdunxh2R774w0+Q2SweE/NkeZgt2zczt9dH3j222GeLLccYo5CbzBQNxkdSK1iTOExF8iOZl9sjfTMTk1lifkMBLdPBYiZl8rijHnfYlPup4mWtEBsYPg16Hxe/n4/sqct7m3vD/GAVbCirtASd7/1dpruLCrQ4OQOKDNOusEWKD00WHTNYJSqgc1SC/rwp/+hOKjnzByH2ZR0S9cPpP9S5UIAvV2VMhXlVz2Eo3z1Y9buxWUG4c1+TMm9zmY92Ur+V+npyOv8rM4wEFDlLrFKZxAogB7PIn8wsi4TIPZqFUVLZLjWNS5Lg9oa5bVWKcxWIZe2VkrhkRsE+W/LRrOs0aqeSXfGM8gnjI7Qr1SWNVoHlUSWEdgL/vP5yyAIxGVIWvh527EPD09TzxV41do/7numxpfvC8363Jb2eEJuVHYj+fjIm414OHH1DbD3tB5DY8Fzm47Pd8YVLGJ/wTSAlQSTzybUmNv3ktz1h1+L2wnSbyNFwOrQK7qMsratPxvNfaZe61KUudalLXepSvzZ1AUEyiyFl3U1UEEQlKKSzHcx1812lKjZjfCJFBQAWY1VBdwvPfiy5VIznyoN2ZJXS2HUnNZeGxIxGm1QpO7lGgZGF9l0kDnGbVkPJIIsvBihbJYhVfwGrpox5OR9ZjFclrKav9c8aOQnVMwSSdQxbt4JBhSEjNtN08/KQniuwJBnnI9xG4pUhTZZ8sguAcM68ydUg0rAARamwbSJWvVAycJU5fGMdQzuqMWht4CWtRq3NXhkSZBhuDbEtDb5dqfrbH2Xap4jbR7IVNnPGnyztXWmerxzT9Y79y0TqEu5oNOK3DIHfF6PXSqDpVI5Sd77nnbIQ0miJH3okCjf3ClYAmDkrw8MJ45XGo8Z2lRdkI0jS3WyNHVUj2+mNogPd7cDpuYXJaFToSc9NsoJEUBrCo15rbT7L8BfZUf0uKcaiiDaJ9fUaVSwBJBWZxLzS/82szfDsReevKPOjjomat6q5bjgZ7kerZpihXMeDxtJmlznuHNIm8m3g9EmD39e5ps24KVIOjajNuHtdwt7lW46vGr5180jnAv5qYn5oF8NOZiEPDfe7nm074W9G5tQi+yrdEt3AF0OaDFM0hNlhbDW7BK6OdC6QsgILo4tMTsGs4DInZxlmNSOGAl6kddnIxXNj+/bArhuZgiUmw36vgJuYTN9PeBt53vcMTw0ymtWQeVb/kLC1dO8N7XPEHRNm0jckJ/jDem+omXMmFkmRmTOxFaYrw3Sl983xG3kBPpsn6N5D85zZfGGYrllMa0ORq1RjWFP9OLx8JMmRAH4v2CFz+tQSNhp1W4GcxUdFCtPDZOarM2PVEVIBKlOX1aBzlAVQS35lddR1y8wscpoaN0sua8Pim1LmuoXoM2bS8bRPjtQnchfXNdgW5l8yGkt+/DhELfZq4JyLkXWORhmFhakhLoOLEHSMGWWJGgeYrzN58PzTu1ecRq/Sk6dmAYolgA9C9w42XwjHfUfsMmFbJwJknzCbwO71gWPfYv+fnvZOMJNbjrFOvOkbI00/M0x+Oca3rx8ZbjxPzz2+CYzve+zJrL/vjOOfmDcM02qOfKlLXern108zNf4sWBl/UmbK+fsvrJBLXepSv2r1Zy3P+dPUBQRJop4cJrOkJJTdRvGJPNr1wbCKxmuDmvXfnFeWgtTnR5OJpcFLQdkZOQs0iVSZHKMp9PQKhJRd9PpdkzZ75NKAleYhl6f6nMvx+kS61d3IlFZac5otTIZ8cmAy0SeYzx5yaxXpwrmvAIAriTeSdTc7Bd14taNdGgs7lYd7kwmbhsmxNDewNhOmD4r9iCanVPDDHdfpp022aDNRzSbPgBiyNvapyUwvC9jiMuaklO7qO6D07vr9ELyasc5bbUiyU1lIBXnckBivLKFV00p/yvjnSPehGATeOLKB00vDdO3IFtozjxMzQ3NIiwGlRJWnVAnDdCWER21ubn4/FoPXuBjcho1h2hrCBsZbBSnm67Tu/s+rseSySw6LbKnxAXurTJzDVadeGcXEsbkvjJlBj7N61GSrYFDsyjg5yDMLILfsvteeK7Cacjai4MqgzXK5NKRGMKMl9jqOkliAoOZRfzY5ITVCuG8UaCmb55W5k62UpKAEryb2302455Wh1N5rI1+TZbJd2SzuznGQDfftxBwsbTcze68JIlB25WF/t2HcOvp+IgZDuU0xg1n/NMpuiS4TTRlwA++j4Xp3WkA+a5OCfCjgJ9uJtp3JWXAlIQdWI9AwW2xhlLzsjgzR0djI9+fXAHTNzHdu79n5keOt527Y8jw2nMZm+fkQDU9vHYcni39y9F9l/KHcL3Pxh4m6FknWe6l50kGSlJFsGV4oQyI2MP/GhBRgdXj2TDeO/ivo7hOSDKmB8QbGF5XVUwFX8MesYN5Zgy9Bj6N5Tki26r9zfWaCW3HgLCoTFCFuE1NZVNxBCJtcmCbFa+YcpA2yAB3uUBJ7hnUuzzuVuilLZmXRVckRRiV3NcHJHYUUDXlvFHShrBHLDV6+N67Lfypm0znJKv0TVmNUm6ErrD2ncrDUsI7Bywlmw8O7HXJw2JOweTynEarR63QNmy+h/yoTO2F8UX6neEAM4WSZfOT6+sTjNxv6H9szRo2udxIgbD1mO2FMZpx1oX+5OfHXXnzN4ZOG/dzye198htufAW6TkO97zOmnfl9c6lKXutSlLnWpS/0alPnj33KpS13qUpe61KUudalLXepSl7rUpS71l78uTJAMMgvZGD6KtsioiWiRi8CqzdetSCAYDZVxqei00/q5onTqNNvVbNTlJQoy26heIkGW3UpMXiQ5qVHK//LdRaJQd+dzieiUJmGbiG+C7kZXIslkmUNbvrvIekz++ByL/CF7oEhZYvn8Kak0g1TYAyXas6ZigHoISAQThdBJ0fGzUtBLXGjcWJXUON2Zrjuq9nyXUVYmzkJ3jyzSmcrumG4gvKiGHspwkdmQnbJqdCdTfyZ26o8QO73ONfWi0uUBpmuVaSSrO6fuAP17oX2kHEvGjgk7ZMKDYXihTIglarhTCcW59qnZZygSBTsJ5iv9zv6rEUSva7I1VgPmnUaexg6mVxFzOy2+KzFYxkElTUyG5p3FDkL/uU7G53iN3Ey0/Yx1EeczZaiYl3kr2JOQq4fHufyFMqXq8WdRM0lWPw57Wq9tbEsc7Jn3ip1U8uD3yriJnc6BaqYpOUMEk1Qm0TysrBRQn4vQV3aQyg/2W4v/xpH4phibzob5XUNzb3BniSvnc01OhoenDTGY1SizqRItyMng3nvCyXJ8abAukTd6kkksRME9qxmwGc0iQ6tjlibDY5TFn6HKv8pJsu1HPrt5YOMmnIkMUXfda9LH++OWEA1D0AhiPa55YRCkLEzJMiWNVf10+8Sr3vD1UaOK52JI3NqILUyTH7+7JT/oZHTPBncwi1TFThQ/l7yOlcDwqkq1MrvbI9+40ljV02vP3dsNj394xfX3De6UCSIMbxLclov50IBk7GBo7oX2IS8Gr1DWgwChN7ijfreZZDHyjV1heJiy7mZlfYSbMt97vY/NXFhvZ1IgnauZ5FdfH3kU/DHTlqjogzF6j5mVeUGG1K1SEkqctx10XbGj4J9huinsrZtMnlb2SdhohHg1ElZmlCzSrCUhKK5zsRoaS1bJIm3ElrnYtjPDocHslQVSo9eX3y8J5pvI/GkkbBv6r3WtXtdLZdSZUZik5+mFxb8+cco93Vd2OZYs+pn+yTC+8NxcH2nKutLawJv2mZfbA//g4TukLhFHWSR0koT2Pi/X9VKXutSfvP6izF1/0e+/yGUudalL/UXXL2zU+t//d3/m330BQVAPhZTPDAqLp0GmxDCWZJX6QP5RCsxsyEWykovEISXla6coEGQ1O2U1jrR9JIWkRnTF74Eki3adVr0+cijfnwvdujZ3on+vVPYYDeYMiLE2M1d5TQFXxKZqSaJNYTX3NBnrzho6IHRxNSGcS/QrBjuxRKvmamCIplaozwmrCQLASUhHlXmEjZok1vGrcZjVdwCKdr88iJPKuMrawH3EXSrmiDln4iYvqQ6xWz0DkFwaciE22syYSZSyDgyfKPgRej02CcJ0Y/DP+kV+n2kOBol6AZIzhNuVfl8jbet5m6nKNPQY/DFjB6Xlz9ee5IVYpDcAyWpaTaq+JrMQ926JhkUyttfs4TgZmmdN0mhK09c+WE5veqZtS+pLkpEATdLoZyAYoz4oocgAiryl+qJkWRNCqlym/h1Y0mrquVaDzeGTClyweDO4IRfPjtWnIdyqhEGBE8EeSwObanMuiyeMP2TcEU5vHeblwPVWkRZnI89XHYf3G/x7VzxJhLAr83+TsEfDfN8is/rpGANpU06sjEt7J8SjZZo7jeutc7VJxWBT55qEgsnVcckKssTckHwidZHoznwgJDNHS8gGIxkrmRs/kM5iiPeu5Tm03D3sMDZiTGbTrV3mNDt+9HCLkcymnUq6hybpnNerfs+3Nw981t7x/s2O3z++AuBHz7c8nTpiNKQkDLPD+cDz4JdjTKPVNWSwyDboMlLWjFf9nk+3T/ygnfiQPuHqBxo/zYuJt68VFXy+6vA2Mkye423HfOewR6F5LgdXfEJCD90ddB8S/Ye8euRs1U+lGgenrGtMvFUwKvcZ2VtNJsrlPs0rCIroupF9Yr4RzGQxQRbjVr/X+ZcaNbtdkmxcNXTVNSB1ifxssUUm1r9PCxiRfPESKXMhbjLczKRyT8udX8Bc9cdZz72WmUSTjARSn/FdWOLLp8mRZ4MpJsTzVSK9gFxAEjMYzNXMZ5/e8fX1jkNzjTuuYEvyumabWejeWeZjh/udJ9w395zMTqfzewXzktP0nfmhYewnPi2A18ZNjMnxHDs+DFvs1UxwiXioiVJ6X6aLJcilLnWpS13qUpf6NawLCILuMlZvDihacg811jZbFh05lIa8sipKNGxm9etYqjBKlkYrysrEkIz1+jCfo6yMkeLZIT5hmqgpMVnUvPXMtwRQYMNkUhRStOQsa1NWAJJc0mvEJqxLy4M4qE9JigbnI9aquWv1O2ja0pRkmCfHfPSa8CBmidwMPQpKFF+RJdVk8cuQJaFFm2tRb4iyQzvfRDVzbfIK7phMWgwC9cOqNwBAatMCKmmTkhdAqF6XJb62jnkUciOrh0iQFYRockl0yeQ+Ik3i9ApO5Ri6rx3jUZY43/Fl1ua++sfYckxBwIIZhLAxy46qfxbsJIuHRWr+aGNR/98E2P1QO68KssQG5puEGYXuUei/ythipgrQ3Wf6D4ZpK8zXunM+72C+yYSddk2pS2QvpFGTKiQKJq1MD1N8Fkp687LrXoGimgQSthk7FnNVgf1vF6+JUdkHbm+wQ5lbBoa3pWuzqMFl0F1p19cb4ux+Qr0d/EGvpx1guOuI1/riy5sD37h5Yt8PfNXc4n7S4J+EsCsf5RP2ZPHPDilGsOqvUprvm6Rxr0+ZOApmMkyTX+ZiutLElrRJOqfM4lS8jImdgIMheVFzVJfW6NY2cjy0/Ehueeh6ej+zdRPpDBFMWZijJT006tGTBPcm0TY6juPk2N9tIAr7TWC8dmyaGVdAipSFKVo+DFucSezsyG91X/OimIJ8b/Oep9AzZ8OUHHOyfKt/WICYOVse557vP7zm8dSxbSeeTy0/+PASgNYHXm6PfHP3xPA9x7PckA20/Uzn9Bg318/0Trv+p+uOxzcdz489Q/H3kSSY2wnfBJ5/uIVs6N+nhanhhkzolBkSO4gBjIXU1Mmn97rfS0kqKuthWbZiZeZlIV5FxjeR5AwV3XUH9UWJrXrQZAd2WEGY2Irew9uo177E5WYD3YMe4/hS44drhG82EDpzxuLLan5bvFHOwY9aZkKBcJ8xB8ts/ULBktFgS8JUajQ9qtlNNGUeDKeGOBtiMnzz9okf/oZjuG+xxaC1xlDX+O/2Xjjc97x8+0R6o6DhNG+xgxplZxHck+Hotux3+npjIr+/f8XD0PNhv6FpZ3ITmHu9Z8NsmN8WFsulLnWpX8v6WUyRCzvkUpe61F+VuoAgsKSKLP+fNB63MguyZG3kKn4RlBVRk1qq0efyMJw//sWSa7RtMMtrabIYn5S54RR8yMlQnRqz1Qd9scXkj4SxZ7+cJGPOJTcJ3cmurzdpSbzJUT8/RrMmt5ik5omlmdfXIMWVgeDK+VXgJPtMzCtgkV0GfxZ/UR0L61iWmEj/LMWoLy9GqgCyC5DBN3FhohiTGDul96coGJuxJunxT1Yf/u9WFKGyCHJb3VTzYg4rNpGN0fd4vT5mtDqeZ8kolflA1p9p2kDe6EkNplPavlemSW6jGubOlc4iev2iIE0ktoZ4vbJ/xqOyZ8wkS/TsOVYmnDVSWbj+ItE8x2VMQ2eYdgZ/ykjSGN1shdDqG7pjYvPlROeEsNUTGV5YxqNw+JZ+UbyKSzMERtNjpnXO+2c1ck2NyntqpGhtQMNWzVLDVSJbZZVkC28/u1vOIybD475jfOjU9Fcy7Te04RLJxGiI0TDsPDKaMndWuYJMBrs3NE9FhmA0+SWV9JavTp7TywMvNieubo88Pzn8k8Udyv3UWQWcBmUluYPeN/NeX987s4ypmcEJ8CgL2DQ5S+5qSkhWIHQ+k6NFkGRwJ0gTpGCJm5VRlGwmD5anacuha3Eu4n3E28imUdBgKkapBNFY61lISdh1+nqIZmFdpdFyHBpiMuw6RdS8SYzB8XjqeBpa3p12fHV1jTclsrpQBbwkAhlTLvCr4pbpJUILh9Cwa0ZyFp5PLaf7HoATsN92fPv1Pd++eeTxnx+JSZvxUzHUvGpHQjJs3MS3dw/8jZcn/vD2BU+jDuScDN+9vsNI5v9w3+SZa7I1C3PJjplmr8yjMAu2UdCuSj0EWRp8EtjClqhLqovg7jLTSRiiJbyemd4kKAa2mx8L7pQ1PnsuTIgjS8qQxucKw2vLfJ2wo0UinF4b+neFRTfq+5JTmY87CnHvGT+pWsTKelOQJdZY6Pr7ocjyJKlU0D8LafAL6FjlPqmBeQtEWUBoAN8E4v2WH/3wFZtPjmw3I4+joy7NdX1NHUiydO8F/7Xnod3Sb5Uyk7YRCbYw47ICse8tn3cKeD3cKmPo+NCDybTbCWsT262imI2LvN09M3QnLnWpS13qUpe61KV+3eoCguRCfz8DQcyoVHuJ+gBpZiFalqdcmQWZhNyKSgmqjMWtn1nlLWTAZcRlspSEFoDREbuokY+VVu8yuUgECOpTUbQ5QHn2rQ/aFmJpGCrbwQxmjYLE6LHVj5iMMk7qg3RhskhJsjkHUmrNBeAQW447o01ipW37hKnRkJLxPuJsWnaeRTLj6JlvPPloizyFJcbXSMZ6Zaec+ys4XxgGjSZwVInPIXUfpfVIkcsApGy00cdo0glFnlSZKUbHVMrO7gKCGJXnyGiQwZKSMA4O0+sxmO2MGLBFS58KkJRKJ5Inu/i65KRMF9fNpMJ2SDshPfq1mbYFsDEfAwAkBWpCLzRP4J/0JJr7TFEOcPh2x7zTqN/k1zFoATMlmsdA7CzNPmFmTVoBGGzxRLCZ1CfSIORhjQ91g45DyELe8rGcCb0HktfjjkDcKAA2zqXxbCdlCuzgMQthdItMDMD7SOtLtGw3k5I2fdf9UK6dMM6O/aHj+Nxo7G0uDeh9AQU/NBxeeJ7f9GyuRridmQtoouNqPvIZyQ7MWJglaCM73STmK2WraDMMuUSv+idDiELqo7LAXIJk1hhnpwwxexLcKKQZspgVBDHqKSKTIc5CFM8ImE3gWHb4rU04m8i7gHl2kIRp9EwFJDEmQxt1uvpESrKAEAAbP7NtJg5Tw2nyfDl53u23dOV+aWykLRG+c7RM0fKPecvG6+d/un3ixp+YoqOzgYexx9m0rimTISTPl/6az17e86+8/SfM2fIP3n+H+6MCJSJ63RsXedkfedXu+c3tHYcCXB5CgzMRL4nfevmB7yfhqd/RvFcAqL0rPiJZGRpm1lSkpQq7okqpUk1EqiDIQSOpzazA4r5z5NuZ+VYv/Ckq6OpOa5KSpIwrDKXuMeIGZVvt/1pkuhWVuG2y+kKh88KOeix2gP5dUkCwXIewzUtyV+zUZ4jMEjVsxrysOyaqz0bzuHqMVJBRGWCCmR2n3J+Byerv0t5bxrtrjm8nlS9VsF0KMN8kwk4Ie0t7Lwym4NV1rQAAIABJREFU4/CmIrr6+fakzC1Jhb3yI71O+0evMbuDIXWJyXjEZjZbBdxaF3jZHjBnzMNLXepSv/7183xEvvm7+RfyObmwSS51qUv9Zai/8iDImR/nWTyg7iarTEJ3/czM4vmhGnUhF4mH+loI6RxJyag3QYTc5MV/w5z0QdoOQpxUChJ9UuaGOZOFBFG2QZZykJSI2/LxTh+8KZr5GmkqBSDIPsNsVlJKkCUGVE+2YCs2F6nHT7FZBPJcmDCNWaVATcS2CggYyaQiqalMhuRW0KJrAn4TYTNw2jWMgycVNgdAGhy5jeTBMeaz7y8AgWsjMRTQIQnp6BRwqECP6H8kFWDqjD6vY7Du1Ks3QInJtXn1XrHKXpGs4Jcc9fjCrjTfOz3fXOKHU9mlX7xJorIFss1Qdl5DPrutTFa5SfXaqNf4zIOlxusGa3j6nmG69vSlafTHhH+OzFeWx9+yKkGSNUY4OcO8EdyQ8aektP2ssoP+Xb2YhulaFvPJ83kOa8O5xPJWA9WF8YN62czruUoUnr9/C8Bj8VqhSsYi2NEwH7YAjJuI2QSMyXT9RNcElW6Vo+tcoLUKlOy7lsN4RXNvsCc58z7JhC+F413H4TsOfzMyvw6YwlBoHoTpmgXkktJ8urKRrea+mfFlpv2wAkDVz8EdBf8kzFdCbCFuCxunTkubCRsKS0GWn633dbZ2IUJhRA0zZyEFYTTFk6OL7G6P9DcDp3mDiIH7hofyGU034/uZnMzCCgjBcCgRuSKZjZ/ZtdqonibP8dAx2BrTC00TSGUNmGdLDJZ3k86lLzbX9N1M4xQseRpaZYb5AuglXRSG55Z37RZe6j1+mBrGqXhFZGE4NWTg6djxOHa86E5snE6mY2j4Yn+DM4mNn/idV+95vz3x4Y3OheevNoQvLO6kIBWi12XxBJKMiEpWFg+MJi+ARvvB0D7qvd7dZ2JvODQWisHt6AzTrRrESlkT7U4WE183GJqnyPZLOHzHEq8jcZsw25kjymbZ/aFgTysQ0t3rXJhuynWsoLRjAbyzASrToyzJ2UEyGVNkaHXtlqzMKzMryOKOgh39avLrVBLUPGX8szA+t4sXCuj9mlplK2WbCbuM/0ro3gsjeozhqgDVQbEbNXLOC3PKTqvkBwwJBeUOcZ07h7Hhvhj4XupSl7rUpS51qUv9OtVfeRAEWBv/n5KxqDlcoTVPQqJqwlH2QGlu687e4joK+vA9FWnIJAvjpHpFmKlQor2QGlFqvTtDZEBZGQndma6Mj9o7R0GyLGZ69bgq5Xp5f908rCkGZ+corKCJnm9Wfw5YJSvVlLV4bxi/uhyoFMeoeWsUEjAnWYCco080m5ndZuB6MzD6wOHUEEaddvnoyNEtGnmKsmbxHLlShoREbShN0nFf8BKvYEBGQRBTdvVrM5EyiJElKQGB2OoYy7ya1dZzlETR6oMpjJg5CbEzxHMvlsEu8hsp567GoHqcyLobWw0/69+TAINZPCeyzSwpPS4zfWdkfOM4PuhnuL3DDo7pNjN9Y1J502CRcq6hM4SNqGfFqKCXGzJ+D+1jRZv02AYUvHGH0oBN+vrwSbnGlsXg9DzlR0oahj+YxTxSIuw+XxGzbNW/JHndea5zGzQpZL5yZAf760aZRAke6tz0CgB2/cQ8W8xg6N5rksyyg3+nE9sfHOA4fi/jr0bGt/oh7YNjuk0ry6b49dgip/GPBknC/CJgJqeAV2L5fP+kPitho0lHwytH2Kal8cw+kzeRqTOYScFNmVcgx47q+5KaTM7KyJIguMkuzXzsDMemZbsZma4n4lND/7kjFBPe8ROLv5qIs0p3clBZUfUJPjUtm+3AdTfibKRxwuzswrjJSQjBEqMgoubIEEn70hgfe558C03C+LQAe8sNVefiLBxOLf/b/W9wnBvu7reLjC4ESzw5mAwnGk4fer7cBna7odxPcDx05Ayb7chff/Ml37t5z29eq3TqD65e8kX3EnfnF7PP5M4At7Yw5rqEnKyuO23CdcUvwzdIdjSP0DxmmofMeGuRF2WUOpXvhWCJz+rDIUEW8NkEQ2/BnTKbH1sO/1xg83KgcZGHWedK/nGDGzJmUiPX1BTgYCxylWcFD8Km+OnUpK7KBAl6z6S2+Euh57iYDxemS3LruuSfWRKnsqhMa74SuneZ9lFBk1lxJKYbTZMKWUFXBYkK0HNX10Dzkals7FYQDxSE93shlXs+T8oETAVwm21DHAU5/JTP1aUudam/svWLpt38aVNxLkySS13qUr+MujzhXOpSl7rUpS51qUtd6lKXutSlLnWpvxL1S2eCiMi3gb8L/IvA3wR64Ls55x/81PteAP8l8G+W9/x94D/KOf+fP/W+DvgvgH8HuAX+IfB3c86/+4scTzXAE1b2g2qoS2JIiTs180qDj8U4sRrgLfrvykSQEhk7rwwMM6/vq2Wm1UCPbFQjXpNNsu40AyQbz2Qx1QuiIO1RFvZI9unjhJpC1cZksmRSb1i2jcvPLsfss5rpVamLyYUez+rBUUxH665wLF4WRFl2XO1xlewAzFvHw62l3040LtC2Ydl5nmc1gq3xqZWNUffXpch/ZBZMUN+VZNcxyl53rrPNGMzq+VGGpmr067/VaE0JssZhRrOQb5TNoPGt9Vq7Z1T+Iuh3leu+GMiWkqxjaSZZjBXrNaieGvqFYCezSE7qnMgGUpeR64TrT8w7vTVDFHI0tLuRJgsxWGKQ1bTUZ1JbmAnFcFF9AIT+vV6I5vAxY8MWich8pZ8xfqJxzHV3uJYdVuq8ROjeZ+yo42OnzOYrlUCYIYIRYmuJnSH0Zk3UKOM6F++O2Lk1gvcs8jMLjJ+0kJXWv/k6YafVRFYykDL9u5lsPanxDJ9lpHi3hN5hT0LYJqRJtP2Mc5HptqSW3O2wR4FXgfG14Iqpaqy+JaI7+N1dIvSCZMNJpCociG3Cb9TrJSeVZ3Hm9WIm0QSSk7IEYq+78HaQhf0lD8JROg5vhLabONLQ3iuzAMCePOPrIi0z0DwYlW/VC2Jgv23Y72b14ymGs4skJxpmWDxqTB/0Hq4/Pgp5tph7pylNJSp2pbMUVohPxGD5wfuXjM8tMqwStujyytzKOnd5cDxd14ijjEwqn9tPli+317zq97xodNL91s17puC4a7cMo9XPjit7rBoPuyYyl8/XCO/iyXM1c/KZ9IUnG5UuZZt1XUElQ30zY03izm6JoTBlXurnP/uO0Bt2nye2X2QO37HYF4nWB3w/l/uiwcyCPyRGLzx95vD7vKwrfq+sODtpAk0diyorA/AHyEdWCSWrDG25J67Wtdg/f5walV1mvFXWVveYMFNaEm7MpAlXZizpMo3eQ+6UFyZgcsL4AnL1BXGQm0zY6neaKPhHvdyxJD4hkMb6HSolW5iFv6b1q/Y8cqlLXepSl7rUpX459Rchh/lt4N8C/lfgfwH+tZ9+g4gI8D8A3wX+DnAP/GfA3xORfyHn/PnZ2/8r4F8H/mPg94H/APifRORfyjn/wz/2aEwmXik9vz7oV+NOTFY5RhIY9KETtEFOvlDuz+UlVRMuLBTpTF7BjrjGoWbD0lib4r1hZkqCR/n6WJtpqz4W5w/J1aQ1aNMlWb0aKq1csizvQVDT1SZw9vHkKEq9B3AJv5uW10UyKRqsS8SwdqIpWtJi7mq1eTGApIXG7UqUo5nAzIY4tRx2juFqpmkCzmtDkzeiHhvekjZFulMSeYDVq8CbFVepkbiwynRMMf7kYwBmMcmUMjZN/ljGVK6ljluhrrd6ndxhfV07MQVVqkzELEhLOYZcaehZ02jOEmdSC7EvMbrl3GpDoxIl9Q+IM8ymZdxFTBmjpp+LtCFx+HqrcbQJUr+m4aTrsBh4yqDymOwNlejVPGbsBN0H9VaInUb+nt7qZ8Q+gQE7WB2HctzNcx0/lXF19xkz5wJaZUKvJ2lFSI1R01ejYxu61XdDIjTPCqDYOSNR6f1STICT1feOX2vTb2aNOZWYl/thvLEkJ/TvZvr3gdh4JDZLYkfYZK7/APbBkxoYbi2mDzRdldEomLN/azFXM8HqoIftOkfGW9Ex8ip9MJNgyznEnZCKV4dvA8YkjlUKBkhSw972ESRmpusiCwrrXOruE2Y0HKeO+TNdc0zIS6yw34MdLbHTxn77uUa6LhYzCb1XvCU1JfXj7HZxJbbVTAXQ6x0klsZXShRy90FjYUMnagxaY4K74iPRRHKC8anFvfe6fBSgJpf5n4023zpvQO6LlKTIByXB7Cxf3F1zvPJ8aHSgr5qRXTuSbiFEy37fkU5uAXLFJ0xJyzJNJI2WPFjGpAiDmEz/4sRpFmJvCTcB6TSFB3Tdqmaeu83Aqcg7brYKwhw2I8/tFZIcm68y7tGy3/S0n8RlXRreRLJRzeP0AsbXkc0f2sVfpsZTm4D60ET9e51L9Z7xe5VYzbuq3yv3Q13rS8qSLh2r74mUn88GxltongxNTot8rXtMmGCYd8L4Qten5BWYtMXnJxs9tuQylgKOjkblgOWedEf9ux11nsVuPUYTdHMg/foLZn+1nkcudalL/anlNLX+/8hq/iTffZHtXOpSf7nrL+IR53dzzm8BROTf52c8dAB/C/iXgX815/z3ynv/PvAHwH8C/Ifl3/4m8G8D/17O+b8u//Y/A/8I+M/L5/z8MsDVvLImAOtUL5+DNjk5C9EKSkPQB8TKoJBUd6lX5kFt/mKbEaONQypWEecgyLIzWH/2bNdNov47ppiwOn3QrX4ZiDZKEgQRbWo/WrqD+gJIKtG0PkGzeo6IyYhV9ghJNE42GWJcPyUnWZJeOGd/jLYco+gzc40Qdhm6mbnRaWVOam5pT4KZHGE0nLYOU9gm1kWcBbfReEZvI3O0zCV1JBfiS/U4kJJ2U8ks8+g01SYDLpHawpipv5cqWFPLpwUsSJWNUFMazmJ/s2SCKZ4ch/XHky0AzDmQZCrLIy/NamrX6yQRUq/jn62yZaJdTUmrl0i2+mf73hKOhrjT7x+7wkKZDc07i50UiAll7sRNQtpILn40uZiUTkZ3g/W4hea5JGoEGF/oOdSEHJlNYXfoPFIgYvUyISkokI2m18SmeIC0a3MXu8KAmhW8S349RwU19O92TJi57KoXgMMU74LmKRI7w3htOX1ilkYQUN+cDhBP+xjp30fMbDjti3fLFq5/ONM8WUIvTNee0Hum29IQPyrQNf/YM74x0CTiVVo8dcKNshGGDw4zQ/ugYM7Chnm2pBKVHPuArbHX9Zr7TGrVzNMOyiyJfQHMZj0GN2SuPo/YyXB/rWaX0/Vq/moHBazCrMCrPybcCaRMeEkluWRKxEYNcdVPQ4+hfVSDHDtnYiNEr4vC4Zs6RrHRn2/vNUI29ADCfFMuo5fCJhPyyWL3lvZBvSa2XxbAzMPplWF4reesgLFZWD0mlrjbDMkbZt/ykIUH1GCz7Wb6ZuaqnTjOnrmbGbOyWM7L2IR1mSnrOkwxdc4u0/oAb46El4a31+p4WhN0QjScpmKW6yLORZxJ7BqdgN/aPfJDF3lvb4iNx06Z9KHhuPNLQlX76ZFh2zK+dHAzc/tyz+P4gubBlOsghRUGwyeA6Do8lYQaiUJyyl6r3iI6h8s4l/vEPwuhzwVIXa9jruB7hnCVGT7RpKemxPz6Y6L/EAsQYoidstxiKwtQ0j5mhoNhusmkRhPO3H5lHdV7e/H+qcB9+f0UegUWFzD317d+tZ5HLnWpS13qUpe61C+lfukgSM45/fHv4m8BX9QHjvJzjyLyPwL/BuWho7xvBv7bs/cFEflvgP9URNqc88gfU8bmJdGkVqSks1S5iUtL7KnZG0yVuhSGR5VzANpgFnpxNrpju9RZA12ZI1XqUVNKgJL0on+6M4ZI7OqObC4Ruuv3EPho912SWdglyWsU4mJAWNghNV0mR0uazCqzOaO/K+Ol/NBZAs2SuGLQcTLgthM0lbpumPcO+2yxJ8E/GdLJEAuDYN5FTBPxXndyGxcxAq5ciyk4vI3ELDiTsCbjbFzkOM+2ZTg1pMkiNtP2M8akVa4TDTHod6UoKhuI2sjmAgAQRGN/DVATcnwmWb04s2hT/FFKTwGWQFlDuYxRBmVmRGGJwE2yMoZKEg8mMZu1qTOjWYx03b7IKgqLIw/aENuT7t5T5ltlJaVG1NSwShPQ65P6yFRBCizZCe6QMVGZAdmg0qUyD+2gc0/ZHBmCELry0xNEI4SNEHsFHLTpqkydTO60AZfJaMT0pGkrdX7bQeUC88b9UYq9KCOiu9MUjuGlMLwp0qd6CpIVaLCG/p3Qf0gaBVw+a7oSzJy4/oOR2DuS1Tk/3hbgshhwXv1AcEfH6U1S5kO57d31RNvNhBvD+NCRrcMdZGnu23shW0voVX4Uu/wxK8nAvMsMrwzuuN7bsQGK7Ogohu2Xic3XidOXjuEbgeF1XsCqKj+ozILhpcGdMr6ykgL4fcQ/zWRv8BtL8qs0yj8VSUiC2JsCThlCiRkOfZlDWYEVMwt+v64p0gm5y+SDo7lTuZDfq0So/1o7+WyEeduoDKXRuR9JiyzIjrJEFeeDsrxmv1LYwmzJO8HbyDg7rE1YHwkVBMkCkmjbGW91bThKSxrKryqb8S7y9uoZU1gfp+B5OOlkDckQyj0fk1nWlTGUOOfNxO+8eMfL/sg/3b4mf91iR+F0aLm6VqrHp7dPcAtTtOz8pEk6bzcMfTGY3ejnh5uI2c1Lqo4pzLU4KZgcW8/4wuD35dz7dcq7AZrHFWRL/iwWuGW5l5OD8WVmeAXNo47R5kth8y7gjwpmTbe67g+vBF/YW+2jmiPP2yJ1Kde7miVnQU2A+3rPr4bIAHGj11Z+kd/Wf4nrV/F55FKXutSfTf1ZMUr+pJ9/YYhc6lJ/OepXlez614H/62f8+z8C/l0R2eWc9+V9f5BzPv6M9zUo1fUf/dxvSkI8OqI7M5OYBQnazC2NoV+lJsln7FlSS6V/L8qYqmXPSkdOjj8im5AEYs8BDf3BZUe1JMKYIEjZXVfgpXyFUZp88tpYZwsURsj5MdlBFrZDmsxHrydffDJK3K49mo+scnNhm2TL6ptR5CfAmmyCsgmQTJgcvujzt/3A3FpOviM/Ovyz+iNI2bWNUciN5TBYjq5TNkqWJR40J8E1ETGZYJOyRdL6OhTpUQFpRLI2VeX16ISpyHrAEI9qsJB9RroiyQmClFSeHN16/SpYdBWI4xk4VN6wRO/acqGrDMkWdkCVx1TaylSoOjYjzcpAsE0iJ/UdScGQrF3iXOux1Ib63ENgAdySICezgDPYrIBUu57jlFVG4Y5nCSaGZa7ZcmyheODornRGCoiShnIPOJi3mXgVwSUdN8A2EWMTIlk9S2ZDevLUtCQJYDYrhabO4fr9ZD2GeSPETjh+KxNeaxLOMhdKk3xqHLE3JG9onjPutO5877/ZsP1JOach4vczviSvHL7VkQ00e73HJBjmq9UPZpwMh9bjdrMCY44CzujrFTwws5CsEPu8+FHoGyBtI6dtRCaDfzCYIEw3aZEuySyEjWX7RaL7AOMrIbyayUbnXTyYRRqjkazKIujuirxshKbVtKIsysRJVhamyHDbLOtQdjruoT+T04iCMgoYyVlSVflzELKz+AfD9scUiZi+P32mHbo/JAVerHqJSPHBObciqtfUnSBbw9RYBRoB4xI5w3FsmINd2F1L7PUZE83bROd1QMYiVfE+sm0mbtsTThLvTjuex5ahsD9yFmWzZSHMltQom20UHYT3zY63/TO/c/2OrR/5ve4Npy93mlR1rd/bu5nb4mFiJBOy4eb6wLQpPj2vLM5FvnXzqMcWHTkLh0mpHsPs6Hzg+bolZuF01ykjr84Vl2i+9PRfCX6fF9CosgTnjQLKoVPAcnwVMVczU0352TgkW/wx4/cKkCSfOb3JRF9ZP8rGMRGi12SY2AupADKSVL4TNspwqzHeFdQzo7D5sSz311/x+uU9j1zqUpe61KUudalfSv2qgiAvgR/8jH+/K3++APblffc/530vf9aHi8jfBv42gLt9gX20qlWpfW2JSZVY/CKKtCButZmpGvranNpRGREfeVXYlV6c/SqjyJVxkmSJo03VP8So6SZATMo8kFgNN1dAAwrIMQFIIWiUY6ogR5TFcLSCJ3LWYJjCGkleG7pM0alXuU45XslAbVYLbTr71Y+imqIqWiTkg2Mq32NtwrlIfz0wmJaQ/GKyCkUGEgTzZH5KYrL+PWySfp+BGn0qlWVRJTI2kYJhHDzBWkyhtadoVuPILJjBqNxlG3Cd8sFzMqvniU9qKnl2jH47ERtLjoUZlOVjBgAUP5lCka/jUfcXC/BxziTJmeUccl7lV9JGYm2UqsdHLHGcW41ord9VI3LJokySMidSU65RMIvJLZtI6BKhSArsUY1La5JvFpXV1PkIkML6HbGVZX6mPmvEreTFdDMGQxitnpPoB2aXCbuq2dFGfQESM4uBJmjDZYIwfAKpTcSXs5pUnvntpDqGtxOj9SRvaR4N3Ycy7yM8/yYMr1rsKdPsFSBZYoBfClm0qbNzZvM1xAcWmc70tSVby/iJpzsWs+R0JgkqDSUDJd1Yr2dlYWQL6Srjt/qBk+80Qvd24puvH5ep8JMXNyTX0zxl7NGQXkzE6/IZBhCVuMSNer2IS8y3fhknOwpmtItnA7Iys4a3Oj9yk4uhsCH7iK3xsLOQXCbeqjmxu3M0jyv7zB0FMxv+P/beJeaWLMvv+q29d0Scc77HvTdvvurVXd3uRu625LYx4iGEsWeAYIDEY8DAsiUGiBkSEoyQBUwQA0tIDDwChEAIiYGZIYFtJpYtsA2WTbtddrneWZWZ9/G9zjkRsfdisNbeEd/t7K6q7qqs7KxY0pd5vxNxIvbesSO+WP/9X/9/d2fJ+XSpTE+E4/swPXWNmo8i8eRaOW4Pmx6EXK2EkzFi4tlYJP0NQGR87ue4NuTlNHbkOTCd3Ce2aqtMgTwFbk+J8fLM9cWJfT+x6+xh+3R/ZJ8m7qaBosLNOPD6bk92kCyEQskL+6uUjjGkVuryHX3COSd+4fIlz4cH/sh7H/C3py8xf7jndDQQ4+VglI1cArMGhjhzNYxcXhnNIknh+XDPe8MN3zs/4fW4YyyJ2cHdGApPd0e+cHXD8+Ge771zzVRiA0nePtzz9/ovoGHg8D3Txkk+ZwG6eyunOT0X5gOkpyPvvnXDyUV+X19f8CrsufyWMUHSvZWnTe9OnDrbZ3wqzcpXBcqhcH5Lmm11ejCQpVmh179lK32ad/7WPd84fc6VUX+0+Km9j6zfReKzZ7/fdm6xxRafgfjdGCgbS2SLLT478VkFQYRlrfvNz38v+z0KVf2LwF8E2H35KxpPi/sIPAYbbNUTQFEvQcgXxcpE1M5UTgHJSzPEXVUqkNJKW4RlxAU0VTBBbL/ASszU2QlA2buWxFmaa0k8ipe6YGKf9fRvjIaGFaiwJhL4yIXRE766UL8q6TFXGRYAJ3kSW1kKK7BAnU0hs+kJABzLjtBndoeRNMxMz2ri7l+aAjIK8RwWkEZ5xEaREsj941XKJlPSKXo1E/oMM5RTpBCX789vTINgrJc0ZGJleoTMfHZ1QsH6VhZAK6VC32dyDkxjQpWmF/NoMIua0805LOVDABFUS2MHocApLn0ASm9lGRpAXGtCHSBQAgwYc6SxcNQ+x1bvqybNurcyCiW5dktXSLtM6QSJStHB2Q4+Bk/mJoo7j8ZI0RhQZyqVnTN98Pk5C5TQ3Isgko5h0UeRld4MQHLtmCE38Cd0pQns5lMkuyuJuOvJfOpMdHQFhCBK2GVkl5ne0uZGY9dRGb965vxeJDxEYx0dF6bHdKWEsyWY8SgMrwvDDaQHa+P+IyWMhek6IUU5PY1MF9Luaw12f1Yh25Ah64rVMoPcR6ZpZ/3cZdgZwNU58Hk9nLj44shv3X8R/UYizMp8vzyCy65QxmjgZF+QVLi4OpEP4zIE7ghzPJqljioNMHvrvRt23cyz3ZH7qW86Ga+9VOThYSClzHvX9wjwg6tLzt8+kO5tv3R0XZgAx3etjKIMyvQ088d//esAfOvmGS++9hb9K2OtlGQgcH0WjE+V/M5EuE3EcyAeYf8hBC9ROfY7ZJcJUckPif6DZM+0ykAYQaPr4TxPvHo/cHE4c7Uz8GSIM8e54+P7A1OOlCJM59TOn6vSaGUQ+X1UfC5Op8Td3Y7z24nr4cQuTvR9ZhaYbgyk+EivuNsPduwp0nWZd67uuXZWyqyBfZy4ywO308BHx0umEtp4qwrnnHg6HHmru+etJ/ccS89HZ1NO/cr+JfOXA18fnnMfLujuhf71og0TZnt+5N5chna7iT5mLjubB9e7M9+SZ9zGPZfftOuGCPnLmfCOgRZ5Coxd38rm6AvzW4X5qf/9uI3IJKQHIU400dfOgZjdq0L3wWs0rp5zP7/xU3sfWb+LDL/wlS072mKLLbbYYotPKT6rbzgv+GQWR10qefkj7vfiE7ZtscUWW2yxxRZb/CixvY9sscUWW2yxxecsPqtMkL/LJ6u0/zrwTa+/rfv96yJyeKMO99eBEfjaj3KyKm5axUdLt+gVFNfdsFXg1fJ9wFgDQc1adb2GU4y5ER+CO78s5TK6Oi5V2LSYXgBeFlPPodF/j4r2hbyjlW7kPjRRPfCFT2emgK3YS8GYCMkYFiUtZRwaHrerfraIa+JlDLSSoHb82tds/aSdpzTnFgB5iOgxclRBgtLtJ2IqTYS2sivG1CGTsWnWQnytjr4Kr6p1tLrwmB6hiSNqVyglPnKEqeVMuN6LXmYkFbp+ZvZx1BLsO1XAtHOtA2fpzFMkdZkYC9rNlCLM57SUu6yveXZ2TlxrJDizRKE614RjaK4LkkHH1TXovL3eD8nLNs7h0fywa+ZWpVWXxhkuYRLKg9uW7oQSFYk2VnnIiMauEW7gAAAgAElEQVTGfBquz3RdtpXvU4LKclnrntRyBS8XCmdpYpiiRskHs47WIKZf0mqz6rxxcVpRyppJk6ykSWoZ031HOAXXSlixjYKSDwE6YyOVq8zZXXh0V7i4PJP3gekqcr7okDE0LQpUCPcRQrD7W63ELPfWjt3LTJwK8ePR9HaCUazqddSw1ksRdHSHKN8ezsZ2CCOUIXF+O6NDYXoY+MbtOwB0VyPPn96RrkfOzwPpQeg/SiaeijFBZHZC0m0kF+EhKHtnglzuzlwNxoj4we0l02xMiCr+O+dI7CcOyfZPUjjl1OxjX8XMOCfGOdGnmWGYubuqtW5moYrC+EyZn83IgwkayznwpDcWxJO3v8df+fAK+WiguzML19wv4s/5KnP99j33h4HTeU9/I+w+UnYf+VTIHdN1Yr4sdHeBy2+Z20l99obs5YdRiGPgvt9xu2ID3Z17juee80NngsB+r1SmUvB7qTli+TN7zdwKd4Hv8YQPu0v6fuZ07NG+IM5gm1W4Oya7EGNgAl7GwuBiyXdTzwf316b3MXbcPQygQtfbdlXh7NfkNHc8Gx4458TomkM3856vXLxiF2f+P97j+NBxuk3NfSbdm9vO+ZmSn82IKHfngUu/9hfdyK+8/yH/SN7mYb6gu7UyFi3C1RP7M3gaO45FkI+r3YsQ9jPJRaunXULvE+lkQsVV86Zz21wpcPyVt5nuPv/2MD9CfKrvI1tsscXnN36YWOtWLrPFFp9efFZBkL8E/FkR+RdV9a8CiMg18K8B/8Mb+/154N8E/lvfLwH/NvC//ThK7Ja82b/Hp9q0ACRLsxKtiWcYgwEDYEDAzpLrpug/B7QTcqFZ6eLM/gqkBFWK2Au7ZBOsFA1NILCCEyWpuVgoVi6Q3HklFXQwSrPMiwZIS9qq64uXT7hkxyJqKjTL1Fa642UVACEvjjUarNa/ZKGE0koxZBYrx8hWWpG9P80Zpdb633SUrqBDQTXTuX7A0M/0/czYz+Q5NleXGnkW050osoAgUWGsmafbqwKpy8zeb21WxQJJiYN1bhgmZk8c57Pb8Obg2gYCQVpZT43poWOSjrgzHQURNW2QlUNODRWl7Hzs6+dud1nnz+KsU9EmBxMqyFTEXGrqflHdMnMps9JV+7RTFAPjpAI7wcYgugZIVrHL2hVK1e2QpS3zHBlPnemIHKOVNHVlAUFWpVIyu9bMJEvXddHQqG0MszYxU63g2xwfgXQtAmjxcZ2FdGu6E4/EaB1EimdhulR0WLUPoMD9611z6KAzJ6TuYDfyPEVKFua93Yint8XbbOc4fxhIx0TnQpDzTkyseAU6WakGMGtzh6r9kQxyC+nBRWenSOkj3d1iszxddfzgC3vKUAjBng3d/SKIOe8j6YEGNua7yHS/4/6J7XC67njd7QmhcLwbLLEXDBwDbl8O3AwXfPfwxDRmguu21OfSFJHbxHGUpmvEUNrzLe/cBvnJzPtffsHL2wPj9w8MH0b++rd/EYAvPXtNGjJ5p+YucqHkYXFbChcTQZS3ntzz4hfg4bZDY0d3Y6fbf1/pb4TxygSA08mEQeN5QRVLJ+QLA5bS68CcB165KCjFQBnJEGepZjKkBy/zOOGOXDBf1ufA8lwkKuk+MH04MEVl7O05KfvcRFnDKRgwGTHwYIabjy84j/bMKDkwHbsFFC7Wrsn1OMSfm+dzx0f9BZe7S8Jqvr8673k6HDmkka++/YL7qefm2cDpHUPD7u87K63bZQ5XJ1SFu+PQHHR0L7x7uOUX333BP5wi+dsD3Z25RI3ughNjodtP5NjZc/ohUhSqCc+wnxiDMp4DKgYMxjOcXZ/m9DRwfD/A1xaQ7Oc4PvX3kS222OLnM36/jjYbiLLFFj96/ExAEBH5N/yff8L//y+LyIfAh/6S8ZeAvwb89yLyH2J00/8Ye+X8L+pxVPVvi8j/BPwFEemArwP/HvBLwL/z47SpdMuLsjw/m82pur5FEwC17fEYLFHMJgyYAfYVZYAqnqlDoYTQQAYN2jQ9KLYS/SYzo664x3NlMkAeI2VQym7RcZA+OzghJjD5BhMABzw0ebJYwYR1n91hJpylWfzWpNBAGQdWxPYzN5DwhnOJEDIOVkS0c9AGa4uoC3FGQR8iuUvMgyU042Gm62cXUHW3lFBawpBVmOdo4qZ+yr7PnE/2fXERxFICQTKpy2jKTRgRTCgxJneWidkS/mOHnmK7DjVxp4JAkTZWcjKgKVetj04X1gg8LiiLxiQJNfn00DE0NogK6EEXNwxvQ0Wpms5G1SzZ5dUcMUHSJkYLVLFVGTJ6xq2K1cQxfS6FUSgSTMukaiWIItVK+KYnHM3NRLIl+oWANuuf1Q8LkFeqpkhwJoCam1Ht55qxU51VaqwxkNKZg1IDV2bs3hMW4Ua/X8JZSNFYRNpp07OQG3dM2bvbUbSfqvNgJzJR45xtv3KZ2ziPT5Jb+Uq7B+Jx5Q4zQbqvfbT7OZ501YfaR2MzHL5v+/R3hd0L12kYAvc/SJyfJfJg4pTDK10BlxVIsvErHfSvhemVu5JceCIbYXcU05GJi7hr1dMoqX/EqKp9ULHzdffqzjJw84fiSjPImFMyBe5OA+KMiuElnP6uWad87Yt70sXE9KSQh0A5FHRnVtdgLIjXLy+4eHLk/bduOF8nPt5dMn5o1+HqHwfiWdlNMD4R7r9orJveQZISoQzmWiLZNErii4i6iq+oPZuqu1F1Vhle2rWIo20br4RxDJTedE3qXC1uAX34XqAkmA829+ahGAsMkAe7X+0esQkcbhLnynwaCrJiSGhSwhiwB8cy1uPZdDmOw0DqZkITQxY+TgfevbrjSX/kojtzPZxIz+z8Vc/l7M+xm4cd0xTN9QYoKgxx5iKNfPHdV3xXnzLfdpCFhztz8dkdRkJQ5t7ckOJRkCmZ9g4QnhRSl5neGjnvkumD3Acw0hK5L7zzhz/i5n888XmPz+L7yBZbbLHFFlts8dONnxUT5H9+4/f/2v//V4E/papFRP5V4L/0bTvsJeRPq+q33vjunwX+c+A/A54C/w/wL6nq3/xRGqJYslHS4vqy2010KVNUOPU9eYxNYA9s3zDZy3iYBDRQpq4lxKK++u1CnGa/aqv9pZIYVivdJZlzRhFaGY6KEIqvvM/mIFPG2NpY9mIv7UnR4ueoK5O1YwL0xWxbi1gfat7mCU8t0SkOeNTstDhDRZ0YocHam47aBDNreUDBEpE4AmchT+7OMFg2J8UdQBxIKl6CkIfI6dBZWVEy15fQLeUydSW7MkRCtG39YKv7MRbubhI6BUoMxD4jYfk+br9ZiiBiFpbzmNCHZdq3kpvOfiQWdAW6WNYlrYRF1ZIladaoSkgG3EhQui4jooyrVePs16OuzocV2ySEYsmmH2566JGgrTSkH5wl45dTQrE+VBDHy2U0O0hTwbukZAej1q5ADTyJ2s4p5+DXx69n9LIkn5/qbCEbJ1AHiiqDQKOaY0xQyjm2c5YVQKQB5LRiHNX5Bl4OtuxbOi9BkwUgkKIEt/2UbA5OjNKEH405AuUo5EEN5OiV5Ba5ZbBjaa/MUdFdIexXien7VupU7VU1i4l7HuvxzVkjjM5egOY8Yw10V5fB5lt3r6STEk+FcLabOh5n4qkwfxQZL03wt3sozWEmZKUkIZ7VBJkDlHulf12fE570K4SpOOi5Am/9+tl2XblDLe2Mx0yY7TwaBUJndqpA6c1atbuJjK+fkHvoJmO3XH3Dvp8eOu5/VeGQyUNp4Grxez7cJNK98PCqI/1SYehmvvDeK15eHAC4SZfsfhBIDzBeKfNXT+wvRm7uDSSRgNvmKvMP9vSvAt394tKT+xWJasaeR2LjTb0mAiUGRO2ajE+Eclmvk82Ni99U5p0wPhHmPeR9asBkmGzsK5bamF313gk2fyjmOBUeDEBYz+fSKXky8DpPQu4DIS3I1GnuKUXYP5/ow8xld+ZZbxUUZRfYx5EfnK/49u1TSjEAc57smXJU4QOueHY4ctGNfOG9Vzw87Xn1/SvU778xOdNtnykaCZO4o5j1YToauy11GXk22/GvYyuXGbrMP/nOt/kuPxdMkM/M+8gWW2yxxe8nflQmycYY2WKLnxEIom/WPXzyPi+AP+c/v9t+R+A/8J8fP6JbeQbMgQNbqetTpiiEg3LPQPYVQQA9ZPJsq/a2Qm2uLevF+dIpZeelDJHVan59g18NQVgzODwhGbAEe3S73llIExRf1S1jJO8Xm92qe9FAjuqMMgdUdSm1aCDIcnpzh9Hf5sBSV5Cl2ApqyBAfZAFqotpK6rAkpvHoDBegjK59UWxF10AjwBPLEgV9ZftUvRIDpLS1i1X5xtwVpr5rJQ8xZQN2poAC8ywQ9VGyAQY+TIIlCAXTihj8WqcCnRL6zLCbTKfk2C0gyM7KeDjGxhaRVJrLiQToukxKmS5mYlByEbK7KohnRiJKUSFGY6XU5LsyX4q6g4oKqct0XvaUYuY0dsZ2CfbdEzBVC10HW5SllIric2pY5jNgoJlT9SvIYt9NFA0GhnXGIpFpmSvSJjYG1immAVNLT4KNX0yFuZZQTaHZLVegpkzBQMMMqP52GwUxILIMboksj/fIztoJJ7vnKAsQI+oMEmc0UQSZljKJ2bBK5kOGnQGUZQpN2qXbTygBSbldm2nITEdnYZwD80Ug3Qv53kokujuW+yjAvJeF0VLqcyAyXbrWw0lJD5n+ZiI9BI5vd2gU5sGvpZfXhFntfpuVNIMca3KuyKyE6kRVFE2B+VCde2gAZRztfpZZkVLHCEoXyHsHk4py+e25fWe8DgbCjEqYlOlgZRJhgnT2Y2QYn3ZMTzJShHgf2nMCYHghZrMqwuv4BL3IPHv7lsu9VQLIV5SH3QXpVSRfFH7x/Rf8yvVHDH6QFDK3045v3j/ja3fvo3cGStVnDmJAcliBxfMFPPTV4cZKleaDJf39a4MP5/1qHl0U0lkMzMqB8VrQtOi/hMnOI9lLhDoHs91yWoIih5kyRuQuMHxsQE3V+TFHLWEehbxTpmu7B0sFIwMwCaeHnpvLHYdu5JBG5rJisImSpPAwduRs4Gl1U5rHyN28I5fAWxcPvHe4JVwo/+/DYJo+0Ep7usPIHBOzLiC9TW4hS4S+sN+dSaEQLgvX7sLTh0xAOdZarc9xfKbeR7bYYosttthii08lPquaIJ9aSFLCWyNaIPhL4jQm7qAlnSEUtFtsUcVfhnUOzEMg3i0rbWAriTJbOUBJivoo28uxJzDr/E490ZQFDMkXSp6FeLbaeKisEH+5PUGYgq087woqj41+zJpXYFaq9e5v01hY1clrshX+BsYkpdT9PakuoyBzaGUOYIml7rIl2sGsbmOtzz+Klxm5TgIs2hcYyBJOfnxxlkgHhAqSQNVS0c4TuEEpex+jqAZq1FKlbGKfJa0pCCzsmCoCW5kLGKARk1HDd/3ElCPzFNtXkjM78j4yn5JdqwrMYNezrMY1F2EuoSXS4mwU2yjt88pWKSrMOXjJj1D8fTz69jlbOZCxFHy1PSjR259Pdl1lljbPJAvMoD5OYWfL6FUYtba7tTkqpY9eCuTMCAmPbHGldiK5iGVY9kV5JHSqXhIjFVT0+ZIvoUwVPFxdooDPUQciK1NGaMKWdMVtnJXcRfTeQKmyd+vTayu7Erf+Lb0iWZpgpwafB8Guuc6uBeN9nOr94ucJXtYULw3Ry7vAvIvkfSTvTZMiD0tyLmoshVJFTqPZbldgD6y8pr+NDK8LYbaSlOkCRtdh0GBsqvMYDQA5eqI++1xyQEMFNAklBUonzDv/vrNRNEDYRy+t0cY0EVWmg1B6IUwGFlx+dyYe/XnmOXiYlOHFmZICmgL3X+wbQNDfFy6/ETg/s8nW3VtZUB0HY7bYvV26yHSIvLp/ijx1sdY+Ey4m5qgwCR/fH9inJ/yxp9+2c4tyO+14ddwj5+DAztI26wjkDuiNeXN6r1Ce+Bx/SKSbAOrjd7QSoNGtYfMBOGRuvzSw/6iQjlYOA7KcQ61P84VwfA+zQ4cGgpeH1LRJutvA4fvW/wq05J0xU+JJrOyoC+SiK3aZTRgtwuvjjqkEzjlxyjZRigpBrnmYeu7ud8xTJK6AV80BPQtHgXF3pmigjxNvPbnndbJGTM7MGQYTwD05YDmfVwwyB2ZKMevsi37iwm14+zDzatpznFflZFtsscUWW3wu4verPfKzio3BssVPMn7uQZAQCvvDmXFMFFeNm287ckhthXud8ILl6yFmpMuUPjAnpRxjoxoTjN4fRlzo0l/khQUQiTwWL3UWQxMu3WdQmIdglOuzmHNIBVpmE4kMM0wi6OQgyooab4mmJ0jVsSSsX+grGuEMlfX2VGkPutKzEObDwvRooEanSMyULjB1sQEycVwesnkwd40MCxjjJTSSl/ZWodZ66DrseYCSXINlVd5RBl3AJQdreBPsqe493j9NSui8RMGTC1VzVFCVxySdoKSUub44cdtZbf5abFKzMGf7bIqxld/E+JiFUROgPBvYUT+fpkiZgiXlvs9pjEy9l9MUsRVgrQyhTIiZWAVyDw7OVVecOgVHISf/ZbcAIBK0ATOtHKbLZGdwILbirJ2XWdVhTKuLEpcSK8AETadgpTBBkVMwpkhY/bFSgb6gPTZX16436/tLfRtCE4PF2DzaFSQp0hdbVRcQd3+RWMgprVyE7Ge68kOvQRcXw6VIu2dlMoBLY0Syz6vOSmZqGyUpejnbdVZBY1hAkNnKqoy9BHlPu7crEBOPwuksdLeReLZnwbyD0zs+VwZFXJ+nuwukOxhe04DPkur/TbjVEmwWxo730dxv/HMHRmon5oOxfcJsTBk0MdyUdvyQlTwEJPfGPCkrEMnPcfm9zP5FaK42JrC8sE1yb6VOh+8X5kHo7iLjM0vO54OiB2MkpZvI8fYpv3lxzbe+9NS+L8rxYaD8YMfuo0DVl6kRJutf3sN8Yey3+P4Df/xL37Xjl8jXX77FzYsLpjEAicMHSndr3593gfkKbv/4mfM3Bq6/bgBGuldyBZMidA8GBk2XRj3RpPDgz7UHobs3J6h4dMbIYOwTMDAkng1I6e6g9ML4BLIPokY1lpQKd7c7ximRUiY58DnlyDwHE2C97aEI85Abw01nrARpjBzHjo/CBV3M7LuJcfBSxDK0sqJDP3ExjOQSuH0wzZDxuDA8cg7kGBhz5H6yizqFyFhScyPbYosttthiiy22+DzF9oazxRZbbLHFFltsscUWW2yxxRZb/FzEj8UEcVu4fwX4BUwcbB2qqv/pT6phn1aUEhjHxHROTfw03kUTBQ1WJqC9NucAgDxhIoldIaVMuCxMXaL498scCA+h2eMKXhJS2R+w2IbWVdwpPLJWDV0mRCV3gdIFtDP6f3MPcbZJ1SRp9ri+WX0fa5DreiRFqfoDtCVWxUoH1q4oqup6HH5Adx0peysNsuNKY45IVEI3o31mik7rvg9NK6UMSq5t9E5LqSwQaWyQ6lAC/rt/rtFWgKuDRu1kGOsKvCylFGvaOSCra9fKLGoXSkBnL+eYQtPMaAKxs4ka9mmm62ZUYZ5SK4FRL6Mo5+hsAV9V93Ouq3GaLkARci01GeNi0VvbfIIsK/HW2cZZoxoL5gBdbwyF3cXINCZyjMbIwBgeOgnBrVNLTNCp2ePWcpyVMGoVbTW7z2B9Wo0R8MiOVqLa3MzL3GgsKPHzBqW4XSdVgLdzB46qkdMGhuU+mI0JYoKeSwM0ulhoFyBqE7RtdsglLqVO9dqs7Y5rCVdlR3l/awlEPDuDZLbyMz2ZTkR2m+GqvSO7jPYFVWG+gqobGVxDxRyblvtYu+XZMQ+BWZTxeUAmc6IJo1AunM2yz/DEWUkXHekQyYM0/YuzC3aWzl1NOpvrzR3GmWHN7UVqe7wtQcmDjeUcjQFR+sD44rHzSukhPu+aiK25tNQyKdMH6W4n5kPk+DySO1qJXLWjFTV9jjgq+4+V7s5ZLD1MV/addHLNDg2cv2lMkBJgP0F3a7okeYezMbyPVRvkYCVPmgrdqszrN55+m2fDA39t/CopFU7XHXnYc/iefa2/EfJV4p/9Y7/F33/+Drf5bYZX1tZazjIfjPWWTkr/2kVxe9c6AfobZXhd2TNw9+XAdKXkfnm2l0HoXwn7D5XdRybeWqMkoWhBicip47TrTMC6Mj3OsTGawsn+juRdoFy7OmxQ04AqcDz2THO00q1QlkefqLlUeYndoZuYSmDo7VpP52rnuzzij2NHdjXjXIR9Pz1m1W2xxRZbbLHFzzB+WBnPVi6zxY8TPzIIIiL/PPC/YornnxQK/IEDQXQWxpc7ZHT3Elz8s1jCESahTG5PWxP+SUAiuVPKfiYNs9uzLsn2XEERtSS/JUY1KWuOA7jDgieoK26OhEKIgkq2BFa97AUog+kZhGkRT31UPlB1PlhAgzDK48TT9Tk0LXT+moxr1AVU8XZqp07lrrUgDjjMQiESegNu5Np1FEJHOVqJhPbFNRlWCaJ6iYFiL/3FNRry0t6qr4KXEulKU6QmbaZ/YpoMraSixsrFRcSSggZaYG2X2c4TT2LaJ2+AVdMQeZUDMRaKCmUWK6nA272yT9bJMsDZt8sKeAAWu9xVSZCsS0Owa7GAXZ7UBy+HUCXH2DRFdvuxXc7ifdfkX/EEPhwjetZHJSo5rsAiWICfOnap2E/t41psVr2UpmrkZNeq8bmi0cotkmvZEFwbZ4omQLkC++rxxIGgdj2x0oemc1JsTDQuIIMGhXFpQz131Yhp8xTfV7yfNZF2G2uATFiVYtl8YMKtT63cQZMDSdHAlVqyBhioVe+/9T0GCwAjiuwy8WpCgem2o3uZEB9HnZR0mJCg5GuY+0I+xGUuYG1Y3HiAMZh9M1aakU5eRqS4PshqrFf/185KMs4XM2WodtE0xxkpND2f3YeB4WWdJkJJVg6SB2G6FN7UzgxVoBaaPkgFatJJ6e5d7wfYvS7EY+Hyg+X4NfJOmCc7TysT9P/Ek9Ddms3teT7wfx1/EYCXXzpwkexkQzfxpSev+UZ6RvnY6qK6W5hfBArCn/ri1/hLf/jA+MGO/pUBGQD52cT5FBk+jAwv7DuaDPwAXE/FgNd5EO6/OtM9PRH83pnHyPk6MF8k8i4wvLQ+N9Hr3uaLRuhuhbwTSh8N1MIAOWEZw3i2EsTJHV/kerTyxixM9x1z5wBgkSYaLaKQMhAc3LCyvfpcqs/0EE0LqaiQS+Dkc+147LllpSa7xRZbbLHFFlts8TmKH4cJ8heAfwz8u8DfUdXxp9KiTzkkC93LiMxLwhVmWVZToSX6/mpqNpWe9OVRmPaRsJup2qSpm2FvteCIrbIHF8JryXN1sVglju1cQBljs2oV8WQaRWvDOgMW8hzQhwCjOFNkYVnQdEHs33GVrJuNpBKyJTFhFDSsXRiWZK5EZ7FMYu4h/rJe2yxjgDOUMVC6QtxVlcTiDANPHOv+NSeMhejaHMVXIDUvehl5DuTRVs0pVoMvb6xMhtkT5FkRjeRhjSKZ/oc6Q0AFYxlkIdb6/qpJMkuz4dS00l/olDIJ5RyYOk8oVy467fcKzlRQi+VY7TfRJkjIGlOoY+MgkcaA1MkkDlbI6tynwLRiioToFr2drQQHUXJXKL5PeIjEYwUfxIVmV2DUCiBolsEuEmo7SBMl1SLth5qcz8bc0ORJelI4BZLb14ZRkA50BIKQe5tPTaM1G7AYnPHjOASaaKvr4mBRyM6oqa5IdWjXc7usgMe2w+qare676vihqZCn0ByfJC9in7WNYCCdBtMJirvFFkV78XvWBDPNfcUBiQpyJLfp7Qpdl9ELYT7FZuObJ2HKYta9UUn7Gd3PTXRWREmVLeC6MtnbDjB3Ae28D6wYIatxXgNfGhSSMj3NbdwIPpYZ9GomdIWHbqB07r7yYP0KU0CyMh/8q91yrnpPTbJcE9EKIJjYc7WyzZ2Nd7pzNozavTddRnPNUbv208VyHaszz/ACNApSAiUZMfEb3/ky0/sjIpDnSBcLv/j8Jf/4uXnkHr4rDC+Fv/73f5l/4dd+i9/4yrf55pNnfPTxFcnZVW9fPyCifLB/huTexF+Py3yYLoXTc3OTmffQPzvx3tNbXh+tDQ86EPcT8y5z/oJy/s6ey2+6mxAGTqUklAj9rTNDOppLT31my7yIrabRmEkA85UQklkr632P3CWb67qIuMphRlUIMTONiYcyoLNZ7S6TWk0TSZRxNEesPBs6lW960qv4aL5sscUWW2yxxWc5PokpsrFDtvid4scBQX4N+LdU9f/+aTXmZxLFVhVRbJUXmsChBsj7VYLarGGX8o34ENBJyFXYFGdpREWCrbQBlBzRwsq6VkxcsYqtFlm2gbEianJdV3/XK/U1KY5eopLES3iWF2kpNHYLxVbWa+JZOghIE29ttpCP3WUBCMFdJ6KJvTaAoDfmilRQKEBJwcbi0Rh74p+dqVDHuQp3Cibu6X1q9rN9poiBAswCUzBWiIf4yrtkiJ5cpbQcH11YB3VVW2ZpVr9g9Pr1kNq1WQg5mixBj+5oUhPLmmCX3lkJsayS7GXlurF/6raAs34eX0cZLLEOUdGSKSswRxxIowBzsOvsTJapCKHPNteCC0WmQoiFqTZnNpCsAQXJwAwqSND5HC8spTC1H/CIydKES3VhbBjYZMK92puYaEmRPFlmLEdL0FopAwYytrKnQmMv5B5nMdncWgMZMi/gkfbOhqjzOanNkaQGyvncaODiqh+t9KwsgFw8zAYoniNzZ8eQzG8HuzJeGgbrsilZsabauOCAQb0fJmNy5SyUgwEdZZ8Jr3ycMugpMV8F5n0mughu6JebMgRzIyq5li0pUhkvXSH3VtYlRRZHqMpwOzsAFqwPAmhmAbvEyvDKOcIUCJ25Y+XnZ46dtTHexya6Gk92T6QHHkXpbDzqNdZIe+BCxRMAACAASURBVC4FFxEN82p6zZF+Vxk95pozXgkahTBqaxvYsYpCf1IufpCRGeIpt+fW1XcSr3554PSOlQt9/1VP/oVXjM9tsvWvE+kIl7/Z89cvvspvfOk7fPnqFbks4GsMhXf294zvRl4+PCPfBIYXQjZNUcYnMD4rjVlXxsjHdweOtwaC6Cky94XuMPKl56/5aD8yvXhKf2PfT1MFNu152rcbdZlrVmIE4xNhvHLg58baNz1LlP1MSAqTsP/AGDHzhTYwKgvkVAyILwJ3iXS3CPnmnQGR0xR4NUX0wQFTZ4/1t8LVN+A7q+ftFltsscUWW2yxxeclfhwQ5JvA8NNqyM8snIWxtmGsAAjBKe8CvsQOgKrZTobJyzXOYvxuT7LKORi1OaqtoIqa9kNdJYZWb1/p31UXo0aoFrvRE8yVm8zSbpYSlZ2xLpbVRGmAgTmvyKNSkjx4uUvTT7BtDRBYMxU815PZWS3+0l48wanaHABRILsWREk2rmF+fJx6DqP2R0vWZgdKolpCC1aC4Um3VAbHKI9ACxXAKfe1XTWzre44VUehgVeFNjb1upvmiC7HrU6SXmJTy2Wq9kpZlWmUXlsCRTLnC6nuMF4eIVKBH+/TqqZHghK6Yjl0MM2MbliyjxDULHSLMI/RAJC6ojsFitJsjgFyMOZGteOdh2z2xq6/ohEDQjy5Dvu5lfpoFsoUlzIfb6Ml3LIwmHTFXgqgKHSF0Ge6fqakvIAwXVrKXGq3C4/me+lsLudDMc2P3lCRZjUcMAvnmqhGNetq/11CIc/RSknuO0u6HSRsF7XOu7XrjQMU6cIcd+aolGzaI48BIRxc8ZItgUxakDOhOc600ihfna+JaRiF+CCEMTKPQt4Xf0YsLIlwBsWAxDwGch8fz6XKIqquHe52BJjd8z7bfVc1OubQgJ7y5v2t4tdxAbtUlz6XU6TEQBwy0S1o8z63c+cxEE7GxGjMpmC6HuKga6hTvR4zwXSgMbrqc6U6q0gxx5y8s++lo4OcrsfR7uVgwEoas4Eg2cbg4nZE5UA6Rua9EObAx9NbcLDJNl3as2J4qYy/dcHfyl8hdZnzsWv31Msi3L01MHQz8nRkHBIlpfZM1AjlekbvkpWqfHfHpDuGh6UPpYPxrcgHUXnn+o7vPr+2ccL6FE9qU0eMFZNO5dH9cL4O9jdmNMBUw8Ik6T+MjO9A2WUEuPiuze2H94Tp0q+vRvJeGric7gK7Dxddk7wzYDvvAtDZ/VnM8tjmqnLx/bwCLrfYYostttjiD178TjoiPwmGyJvH3lgnf7DixwFB/jzwH4nI/66qNz+tBn3qIXX1efno0eqx1p9VQoQlONVlNoxCHB8DGnp2YMUpzHU1+E1woX5mJTbLtjDZy38oYslf8JfhNQAQsVKNXUF36tobtdm+sp+CMzV41P68L86QCAsDovb3jXZan2yb6CqBmaUlH2tL3gYwJEt62qqvLMewcfZELSxJsQZpyXxJ4ZE1p7zxbGlisz6YTSOkLmw7W6dS6JtV6CoxzTsHhJJShkorWFb48VXzCuRQpU2aTkPN8ILrqAi8wQrRecX4EZC02C7XMhOwBF9LLX1YavtNywQDS5JSJDcmiLETLPluYq1FjE1Tz5GMLSSTgHo5U1+aYGwIdlGCKEqgzJ4tV6BFrB9NF6SKx/rv0hW33qXprvR9Jj21jOs8dGQXgEVNP0RWArgasPbtzAY0xmJASgnNolOCASKL9bCVUFVx167LaG8HPBahpGDgWffGRAYDjFairmClV6kzICR1yhyTWQWvWB2a1TVqnHV1kkeAnjgAUi1tGwtp721OlpSne0HmwDwLZV+YL6wT6R6i2rMkZDEQogsN5AhvAKWL8GkF2AK5W4DDdVtqG9d9riU+VVdFg6LngLhuirh4Zt5FxK2CJaqJJgNczOSdcI7J2HRgzxwvdYonocxmF9vA27CwEBAHSsaFZSF+b+Z6LyJ099DdLzf/+MS0SG67SHcX6O8i6Wjb+9cT/W0mZGW8DF6aE7j/yjIOJdkQXX4LxtsDJcLF+vkA3D9P3H/hbAyrw8wcFLm3HdKDEF4nujshHoXuDtKDEr0UsYKk401k+uiK7/xqojydOfmf23QfSMfFalydQbd+5p6fBsbrpb2518YkOXwgzBcGfuq+oDFy+HBGJVFvesleJtRp03gJk7UTHFBx7D5OuP6Vks4O5Cfh/CQ8klfaYosttthiiy22+LzE7wqCiMh/98ZH7wFfF5G/Brx4Y5uq6p/5STbu0wiVpaShvYSuwYljbBoQi26HeCK4sAjIq+ReXVdkxnQTWtKvTcug0v2rfsGbWhcFhc7bIgtI015KdcXQmMRKS9aHEIXeV5mdNk9cLQV7Apv3nuyuc3p4pGsBIF5OsV7BD5O1P6wSRXTFyJiE4EyRpb5kGefGDIk+pC5M2bQMvAwHcbAjGFtjSTwruKIUd48JXrZSr18DZ1bn0QjzpXU0DwYQEBVSsdKTvAIA5qWcoXSKRGmOGeDglmgrRcrFQY+1NEkVmxVnXwRpDIcaZQ7N3YUi5Lq9giRzaAm9xLKU/DhI066ZgpZopT8r0EJ22Vx9/HhpyA00KUXQKSGVafRm5lOqVogszApVgoMOqc+IKHmO5poymlvFYWfKqLt+YsqRnAOliLnrzEv5AUHp+plhmClFSLEQQ7HvVD2NWt7hN1n2bUWXPnRdJgQ1keJYyDkspVWs8vBQKNm1D05eVnTsKCqEUEipUEo2J5/zSkjSr2XIIBM80s1Jj+8hm4MBgpKrhs5FZlIIYzTh2CNkDeQLvxdLaMypxgqJi7ZJY3Stn0V4O6hzOxog42LBwsJwajoqwVlgk+0TGoDh4rBSk2IM1DhGZh8HHUrTDtEhE3cGPk1HF+0cwyK+21vpVne/Svjr86yJti7Px6U/3t4OyqDGYrpfbVcYnyrzQYmj0L2ODK/sO/tDoLvPpIfS9r34PoguAreS7XrtXhYuv2fitmFWzk9iO29/E7grO+brbOVCAeLRxql/LU2sNExK96Ckky7PtWD97O7tWfF63PPwyxP5LQOS8pUwuoOYKJxmiOfHIMh0pczvjDAF0qvEfJ0ZXtkY73+gpAdhJsC7Z17/aiRMke6hMN8tQsFhNlHZqkUzXtPKkrp7NVDE210FbHNv26eDcPOHoPxltthiiy222OJzFz/MaeanccyNKfLZih/GBPmTPH5FVeAG+COfsO92ZbfYYosttthiiy222GKLLbbYYovPbPyuIIiqfvVTasfPLlYsCz4BwIvHhVlQWu08zT5WQ12VlUbN1yo8uTpsE1t1S06SGrXcy11yoLlsAM2qsrFIXDe0UdqrQ4mv3Ddr3KZRIO08BG2OLboux1F5JLpony1fL+6AI6KUvgq7rgQ7z8G0QEZ3TqgU/FUfatvtHzyi88uqNIawMENaHXq28hYNQhmMDZJ7Fu2VtDrQTimzucRUJoPoig3iAqi1RCdfVAcbRfr8+NorjQlS7WoV0A5nfSw6J8BiPexsFo2xIYJtVd6vXVFzVslNq8JKPfTkVqhi49hWhWsfipB3phdS+rKwP5KzWGqJirL81OnqLJcgSoiFkgMhFvLJaoJ0MjHNTxTfheW4lVHkfW0aG2pzKU/B3I9m4Vykla5c7s70yQWCVZhzMNZDvXyr5ma1bbkYayRXNyW1kpXZ3Ss0m9NS1b7IgtlVJ2OlECDWi+59Ee9LCIqIsUbq/VBuO3LuKMPvIILw5r3tGjprAdxHdrRTFRsWSudz6SLDs8w4D8Tq5rQqVyk7EzjWUwDVRYPmkaOHXxafE1VQtjVTXJh0/VlYXSipLDQFMSeZqucj2RgoGuxZZWLKAtNy3845tClSCuQshF0mHOyGKMnq0yQVSm8lSWUXFrtmt/KuAq2l0/VUbe2ozLvsDjGsrHPnA0zXBZ5OzArTk0QZvHSnF4ZXQn9f2nhIVg4/sN/HC2HeS3vuh7EQx4LMfi5gJtDfKrsPhfEcKUOkJKV/bdvjEYbXuox3FKbDY0aLFCWOViJz/Q2YrhPTu0aR665HUmeuLeri2HmtJZONQfTWszuCwEfyhKt37rh/eAIYE6W7lcamufyjH/Nxes7V15eyPWN6mFi0ue0o57cKp3dse3qw656Oy3lFaW4/80HZ/xOvTLB5iy222GKLLbbY4nMWP7ImiIj8SeBvqurdJ2y7AP6Eqv6fP8nGfWrhb/WPqgD8syakWcSUBbGXZwmWGFeHDV2VmhhAoi2xB0yHwC0pgcVxQxRJoGRI8jjhqValHlqW8z06htuuysrGt2pFqFpiq+qJYwUx/KW7DCt9inWJRlUAFQNrgmQXXtWWj+iQyWOgOBhiFrY0TY9aBlKSJV+ShSCPc2yt1HoHk2QFADTApObBggnB1nauQaJqzfpmRqU0550KImnSxRHDwQNdaWuY+KUdJJyt/KFUYdqAbVu1KVWXkxniLI+S45KWztbSDR3jAgyJQrTzVFvXqgcDXpJQQR0HDsqwmhO9oIeZ2C9aGVr7IMscC26HmVLmXMRKV1wLgjkgo5iKZS21CLoSFWXRcq3gSzBXDGCx3qyuLCowBappSClCSpkuZmJQUiz0KZNd/2TOgdPYGcgx1bovkKiUVYkQ1elkBdQ0JxaFHAygCEGbVXAd8zrcRYUyJddayQYQgYEp2ebBlIVQ50d1XnEh0bIvVhYk3qbmROQnkWUex/vgop7ezylyeHLk9A5M951Z2bo+Cvjc7JQi5ZFdanMkCrQSOnNl8hKMWtbU6eLGUq+ZsAgN10h2bZViKqYNQHVL4no/Ju/LChejTu9JCHNEY6CcI1ptsf1+CknRkNEckMNsmjCYmGq8Dw1ELp1Cvzznqq4PagK+JSh5X5iuZTl/tHEShdgVytXE2edyicJ8EM6naLa2BdLJylZgJdIcq/hoR/BxnPb1OtgYd7cGRJXe7uPoJT1lsHKR+nw30WUelUOFUehcf2N4Vbj4duAeAx2n9638a384oyp0MTOX0MrDzufEdN/z6tUF19dH4sXE0/0J/SXbfn/zlOFjK2U83XX80V/+Ht/8Eye+fvEFLr9u49y/Vro7dcFnA8Kmtwr9O3ZXVn2d88nuu2rH3u0dqEmFq935k9YFtthiiy222GKL30P8Xkpwfr8lNN/9k7KV4fwO8eMIo/5l4J8D/sYnbPvDvj1+wrbPdqxXY1eMhbp6qskSyzDpotsh2hYnZZZF9LNG1ZCoSWTSZlfZdB9YFg7bCvKbx1mDEu6Osk5GCGo6EZ0noizHpoBMVpOPKEWSAQFNFNHbuOq7rhNfgClAVLKDLC0b8hX+rp/RIVAOQh4j+RzMOaM2sWhLoE27hMeWtFVfxBPsvFfXGPHtKwZHSzDXY7RepKzgh6zGrVrxCp7getJaE1jvo1agxN1TZM3I8VV3rW46otBBrrl6XFbdw9lWYqXQGAZh9hXvHmeKCHJeNTus9EPE+1SZHH6d2vYCoQJyFevK5vQgouYs4x2ep9hAChNALCY0moX5nNApIKdqgSMrtyJsPqXVOCa1m6A6+IABgs7SaOM31wvt7b6xpO94jkhvzjExFkQsAatJ3zQm8hjt3vDxt+S9PLonyWLWtc6CKr3rU9SYg2uCOrC1OkfbTXHHDMhp0QwxUMGAHM1CUUFiIQ4Lg6pEc/Epozm2aA7LxT/HNrekz4SkzLEjPIQmoivHyHSIXF8fuU+F+Zwo54Cs2FWIJfiahFke2/SqgxP1s6aD6fStMugi6uuAYQMB14Ogq38Li95Q0mZ73Z5JPtaNaebuRnIXmjtWPAl5Z9vzviAqVM1VHFAKPo4lKFkdKHNbbU2LkLBWcVkfMw2gl/OibwPWx2OCu455yIRdRi8N3ZhSZLq2+zAerW3pYWm/FNMFKT524xMBWTGvfHzi2YCE/q6gQZh3vi9wfqqMT2w/092o7mJ2HSQbIDw9eP9CYHhVmrbQw3ng9LSjXM32DBfQ84KYx7vA7iYQJrh5b6Bcznw8HPjy09cA/INf2tG/2pEeoP9+4jdfvss/8+43SL9W+AfyJQCu/35k93FxpxoDlMbbQHnL5trb17e8f2FKq7fjjptx4DwtrwPHc893v/l8eYZsscUWW2yxxRZbfI7ixwFBfjf4auBxSvoHJkQx21VgjSFUwc2agBaxldf19xR7qa7fe/NFGuzluvQg4qv752XVdW25qGkFhICVtcyhJcRhNOr62pa09H6caCKYNdmv25WyACHH2BLd2iEDJnw1P9bky1/kPRElm9BnW+l2lxowEKjfuXXmzlbxc20zWALqArIlBxiDTZIKMFRQxsgqZo+qNMFOmYVwXra3cpq8JDQVQNEKFoiaT28dw9rm9XXxZBoMpFi3gbIAF3YS2qp1c3GJ2uA+jcocxb6X5BGLo50vrKx2R1k5y1iyqcESM4IlsWtWUelXDjgr8KhGULdSTYHYYaVLKugYCHeptUGDJfE5KDIF4rwC/7zvTUA2CaVTiie2us82v4Isc3J9HcfFYaiViUVtZRacI6WL5CGa4KtiDIt6nGnFICm2il4SiIalfGzdznkBs7RbXVtYbGQVVBeWQ024AWf8WHKqK6ASZ1WELEaUGoDOXVGCkIaMBHPCMZZJadcpz6HNM4nKbj9yEiWHDh5ssoSTMN0MyOFMP8xWQhQUPa3mysrSt4g2xpYdoPZx1eE1KNhVJVT7LPTO3qpOQrOsWFGhHTNX95peXQRUF0HZCoY0xFagiInGYtcinoVUhUuLoYJlTtaXLGRlYbD5tdeizTKWwOI81AAbKw9TcbC59l2tDeFk9rzl7ICVi/RyoRDUMLpzRE6BdB8a0FPn+PKctXtMZmngaxgNnBxe4aVvVioz730Yr4sxX+ZAOIZmU04tLcxmo5wbYysQz3D40Lanh8B0IYxPe5vnBdKRdj+lowmtdveF44vAw/s9p5vER709a58/v+P22Y79h+bA89HfeZe//ccyf+j6I25+2Wx2Xrx6l+5WSCclzEo8wv5D4Ris3uV7OXCeExf9yGlO3B4Hzqee/GDPjHgTufpu4MM6N7fYYosttthii089fhICrutjbKyQJX6YO8xXgV9effRPicjlG7vtgT8HfPMn2rJPK4qt4FcHEqC9JKtIo6CXpK3UpLqDtMS8/n9F6bbjGPsjzC03WJKJNQgimAtEXNogIo/IF2E0QKVZdgqgRqHWVBba/traVQwIYVqcB5rlrJnoPqKhk5dEUbI8XknOnnAWaW2Yp+hlBZitaTeT80LrLkVsldNBkNyHRWsEW5FXByBQaUlE01aZbPBlEsR1CeJaa6WWCqzGFHlc1lSdEVqyK1YL33QQ5sqs0IWxkhdL2zYfapIOrSymbe4LFCEn1yVZJ661L3EBzIpbp9Y2V7cPjRBPRl+vIEtO2lxwzLnDrsW6ZCg+BEqxVXFcWyY8RPpX7hQxsZRmrcbrUdmRLlPHKP5CdrvMSRTdK7Ev1nbFWBDtQmi7T7Q6zMgyVw34UUrwjL0ITKsxKN5/UUhGMtHej1MBrOKr6t1yfIIu896P0/RcKoDgzKQK6rXV+iIw0wC9WiYlMw0kU4ES6g2pxE7Nwlgt8db1/dzaIJQxMnWRGAtlt7jwxNtIuIvc3O3N0rc5Na0AgqrtIooMbuXbgM36MGDZfw2CiIEW9bgNEFpycWOkjc4wCVD2GdyOVpIuDxyxvqjSbI/rOVWBrpClMstCc4QyO2whZtDk99IUm/20AYgLyGfAEzy6aev9UyBkIT9Est8Q1b43nsydJY7CJJFy4d/tTPtGImiYKSkyJV3GWGiMuMo2Kbti17taNosxOfJA03OZDtJcdoztU0znqV0DXeyYO+tnFmPGjU8Kx1MguQvP7lVm9xLmF6HpGsXzArirl6XFUdl/VAhjYLoKvOyeAbD/4h3nd2cOH0TiCZ78FnxbvsDLX93zS8/MtO2DLz7l9Gqgf43ZBBcYXirJa9TOr/bcXOx4sddW4hdP0Hsb4+h2utu70hZbbLHFFlts8TmMH8YE+TPAfwKNoP9fsbxSw/IaPgP//k+jgT/1qGBEEz1YwAww8EOj06frS3DVrVhRyu3//qLv6oVSqqaIWN078Chh91ENvgjdBCjBVmN9/6rBUValF+BAzEkoKaK5PE4uqiVuciBkDZ7giUyo/2AF/PjvkUZ9b9/zl/3axnKKjLOVk8S+MOzGR0OraivGdVXZBCkVjZ6YRlupllWndJUMKSwgySwGWKxfyp1pUoGLtcBfjZJc6NEp4Yg6jd/3rQmX2LHWpTe23S+Hi5aaHskigitguitV26RX+319nVpSJ3YNa7lB24HWNtQYOs0mtFe7FqKomv2oRFo5U5hqQiiUXtza1OxVa8ITVuU39Vxr0d2q5fJohTxCqdapRGYVSpxMQ0PU8QYvh+l0mRu++k/Spfqis/5XzZYmfLnWdml2vqC9sxjywq5QT7Y1lTZ/iEroau0UlPMCWNT7q7Jdmu5G1eqJXt5W7wsHthY7WoFJUJZjzvW5sBagbeO6uo/GwFh6S4qFxu7QTgknIX88cL6eDKSYwyPtFXAgo4jXPq1AjHqeCtbWOV8HOrs9rYOKTfPl0fPJx6Pe2t1qDHGdl7Xlsu+3fiYKoF2xMr+oVsHVxFXF27CUCT5iPsUVm63O9TnYPeNtlFoWlQ1ADq+jgVOs2uH3ipW9QEWPdQhWRrXWQVmBjtIZo8meK2EFJGnTYSkoeQfnp9JKkBpADoSHQA6JViakfk95WZOksgB0avbdp7eV4NsPH0D3UOjus7XFjz31tn28FnIvxGuzUe6OJrCq/8h2vNVLuJopXUKKMrxW3vq7wu39M/7hbzjQcjlyfK8j94Huzqxw4xl2L21cdi8UKc5w2Vk5k8zWbzAA6OELXsa3xRZbbLHFFlt8LuJ3Ypb8PDJEfhgI8t8AfwV7Tfw/MKDj772xzxn4LVV98ZNu3KcRtuqtjwQIH5dfCKgiVScBmr7HulYf1rmKeu2+tmM9YpnUnWsiVZbz1YRG38CaSvIk7VHjLbkLIzAFc4VYATOlL0tyJ7qISPp2rZoLdfV81b7GdpBKsV9tL0uSpJONUT4rxykgcVXfPwdjczQRWJaVZizZk1hceFWXUpqasERFh+xJb2gipcsY2vi2PMbZOY+cVWY8udEKcYHSVmCl6JIQ+s+aBWJJl7YSGXE3n5rI2Wq7CcNqZGEq1DZUwcRaatJpY1Osr8WbJQ+PmSK6gAbibV2tWqejsZniqSaiqyTXj7EekzbPWZ9j6X89RvRzmCtGYKYj77Ilkjn8dhCwgkfRxHRJ87K9OBhWpOk+tHMlKzFpIFaA1Fm5SM2NS45oKl4WkU0Adg6tTCIEA0dqucRvY+LIGwBa9PnwBghYgSxG+6yyBRBPnOv9oj4XakLfl3YMyQJjRFOwzys42ik6Cek+MMWlXGQNfMLqmLVdbZz9mdMoPLWv9X70D103proN1ZKh9gxKSkG8pGjlZOWCtuLlbzJ76dbqnqokkfnKnx8YcJQreFQdjtSfO6pWAljniBozqAKYrSMNh1mVi4mCCPG4gJattGuwe1AdaE5HBzAmeQR4vnk/aS2hE2t3A1ViHWx7zs+yPCPq34d6f6UjhDma21cF9kSaU5H2Pk+iOksM5ifKg6NXJQb6W6G7q9fFyr+a5sgze96HGbobob81jZLhhe8fIne/YOU5Kvb8370uhH8gvBjMQWb+woheZ84ByiDoKztHnWu7V5nuLqNRmC4ip2cmVnu8tu3TlZKfT+jw8/dStMUWW2yxxRY/b/HDym5+kiDJ76nE53/5iZ2+xQ+zyP0G8A0AEfnTmDvM7U++GVtsscUWW2yxxRZbbLHFFltsscUWP934kYVRVfWvAojIrwD/NPAl4DvA31DVr/10mvcphItSrsUsJao7kxhSFbKgMyZUiC++rtgfddWxYWS+ivhIA6SuplYWRP18tZreShJ8/7Zi6au2j0ooAua8sGKsyLwiEGQrD8g7bdRwmYQ3dUjW5S1rir+oWI18XSGVumq9rFxLXlad8fKBaodb+xackt/avloB1qRoL8b4SMYuWFsES1CkN2p7CYr2gTI+Rg+rk4tGJQT790KdX8ZwvRJttpHa9mnCqN5mqWOB63eEOsbL2De3jABMzsaRRUOgMXq8nGopM9FmoWoNqGPlFr5RH1s1K00wtM2HsNgUl6Amgjr9/+y9XaxtWXYe9I0x19r7nFtV3VW2E0coWHZkFCs85IEEiHiJQHL4kSIQIUAcJIIgiWUJkUiRCYGA8/tCgEQogkgYWQjJJkgk4QFiOZY6RoADREbEgkjQthRQSLfddnfVvefsvdYcg4fxM8fc51Z1XVxVXXW9hlR1ztl7/cw511zrrvHNb3yfCTrGPrIA/Q3rRD8P95ek7peSn2SqYJyDNqD56nu7Auu7BBLG/oygJ82xymCMEitnADQfY27DEkjLzyBhNHcbEhfKZBa38Bx6GqHh2pp9BwCPD4Orr0pWhiAEqKZNsKbehjOQghnwMiekKKNZBco8sTRI3bWmMEGynAOAnpBzW5uCdhcydkYAYMwJOQP0gsFXhtw5E6swq/KAwfKoEd93StZKtjl3wjzfgXF/R8nWIgg75HBHsu28j1Hup0+PERaz2AkEHq5XYRVeBW+9fX0ZrlRQgC4uHuwlMwQd2kQTk83vHRksFtrtXN0vfdpv1+dsD3Him/EEoK2lCDQ42ECY2TdsOjxyb2yaYIxEH8JxRlZFP1F5XjgbZSVr+zNnTYk93Pe37RwPS8P184T1veJ01IDt8/b9/paziroxWvo9YX3PLHsBY4T0k4m9yskYJKSM03uCz33R2vCunHD95g55JrguBOoN/W48n0gaZLH5u98R+slcpq7v+L147+yseo8fccQRRxxxxBFHvCbxoUEQIroD8GcB/AuYrXA7Ef0QgO9T1ctLd/40h79s15d+/Op5EAAAIABJREFUEqM3EzsY4i4lkQSHW0e8/FIvgAIiwVSjUQeFPrK4zK7jXCXp32nk6ux08nQmgTk6xDkWeNtsexMkHN1qV7I8pwO6GMWeN7oBXayEZqoTqCDNTlb77xk37TQSthIpoHr7PdlLNHWysoXYJsQo2YREtamVDtyUMuii5vrACrrrCZpkeyUSNktWZLfxroKf5AlU2oCSJS9B81bXdgmxW95oUObhmhzed9POIJAMEcMAI0K4ttL2AQMjZC0lCT4G6cxSE9ZalhNDGOKUpQSHIgmGfSYrTE1UKMVX+zPF9tbIYOg2saUxJnIqZVH+HW3mwAEAp1+0ZGl57tojoQGS1ynGWK1E4GS1ZWkRDRPQzXsmhsjnipAl4qEPI2D0rgl22Djb38vilrXhyuIgGxGstMoBIumYgA71Mib7w8aPmpVjWRtGf9pJDLDYQ00WVgqzkQFSCVCWcgu16xf6JurX0xoXBwb0pOiqQ/flJCneihAy3alopGjeL6YwTKN0ZPWyo+gjU4qKKsHmeL2u0c6G4WRz5TxeghmkdqxFsT+zdiQA8F6ppRKAmIa4KZDCpVGCBxBwkska3Keo73Az5/3+BAF6FuvymfI5yRcaQsdKQ++j9hHIkp4ERcozQYkgLKllQ91LkCpgEiKnfcmyp/5sqEuTXyvu43keFrh8tWfR3gBFt/IwVsCdjnpT9GeM/U0vPXIgK1x6Yk4QTNNI3lL7udjx735ecfoqcH0b6Sz18CsI+x1jdR2g+//XtKL2b9qh94rr2+biE7onyoTrpYFk6CXtd2Mc23uM9uVmouFHHHHEEUccccQv6/igEpYPUyrzUbjcfNTxKha5/y6A74EJpf4wgL8D4FsB/PMA/jCAFwD+1Y+6gR97eDJShe9yxXUPkUpPbp2FoMtgEkABFhMDVH9JVQG0++L9bi+YvCNX4ce5aWhYxDkyEY2XfQdE9niR98TRmSZZP68jIY+whNxAGFk9wS+MDNptm3AjCFtgwBMiArQP+9NkK8R53N5V6ipqMEOAoZFRE8XoLyxpyPOsgwGRYNOqww1nGSKTIbSqYn+rYohllvPT7roIXECaYCqUxBSrjqRVGRVN6m/aqixfOPVBpog+Rb/28jnMzYbEMAplmyuTVbGPe+Bb9RrkdYovfC5O+jU+F5VGP0HA/oZC33J7V4IxKqomjCIZQsvdjmUNcMG27ztjv9pkvawn8CNhfU5oV1gyNfA410awn7yYuKYumiyOTmrnqn3rZKKg8X0wJtjsU1Wa2SqX5hKsH9IZIoz+0ECPLTeQk4DOAloF7POihS6JEvrOBmyEPouO3JvXnm4qzDJ0TTxkZ9McufKAgG/Bw5geTU3UVMvf40jD/npR8CpjsT3aJgEmKniRvGdTSDVZRJIgTlzo4doTCX25YePY5R7CtYAaiuJ8ZABNu+8Tk2fjc25LGuBBAXMddICDx+F8VG2q9awQKvo/inxGqCBtkcndoniVZKv0x+ZIpo95gEUxJPE82DgBIdqKzkgAhTuZlbYza6iTsXkAA3GIxnPL/x1Qb09nAMwJQBvbByAdICVfAX4g07I5GdMt5gGfO3QVs6Cu92TV6qFybAD9PIBX3q3/1Rmrn43Job/g22yK5QVh/yYATSFv7dClAc62ksVEZQO0Nucp4PRV+355AE6/qGiPX//F5ogjjjjiiCOOOOKzFq8CgvxzAH5AVf9E+eyLAP44Wfb9+/BZBEEAZIlGvnhaUskwWr00RdvHaiI6wMXCli8AiGxFHk7jZmcoyPyuP5bPaZTLeAJdS3IA+1swVjLr9+wlDZP1LY18R1cFq++3IV92Q9wVqlbmo7Z6j8jhCpJCce5o9i2IFwlV2I8uOtnLppDnMpJzEgI5mBTMFFZP8Lz9HCBIiCwKoAsb48AdKXIooyQgrl/DsLpsaivVMQ6CTChjZTsTNBcrFXcJSpDkbAmgEKzEwdk6BcOw67QAVNgmtdyJ3YY3GEITk4MwWD/xcb0GUkAq36/mtdrhtHgD8uI7Ock4jLvygClBDlVkIkykaE3c7ljRO6OdJYGRS1PsLxZoW3D6KqWbTC6ch2WvEHB1jIAIe6wic7RvAGJ0LWwY+Pf3PeefCoZjUondAUG+MpYqBqvAfsfob0mKtwYrBAC4CXrnATB0AnYDNwCA73YfD4UIW0kO6ejkYtcpLVF1JNzjQvrECIvbEO4swsCaP11UdOdsY7iq2ICYgCw1gKPURK0kTjefLDclRqBxTVGubY1w3GG/h/pJRiLurj0JKKyCtnScz9sgKwnbtXlcoKKDhbLParu02fXlDvTGxiiL784D2PGLOgNKq0zXnbkAPWfrg42BDpZdsKSaDL1YBz11IUg4Um1wRg+s3MRBWr7QILEpzAp5tfFmJdCF0P35rueOnRT8yKBO7uCkUJ/vtNs9wc740gsgJ05wR8nayT4m8TwJwFfd5UcXQM6C5oyscGq5fg4uDqtmHS4GusgJuJguKlrcewqAFO1e0AF0P4fsxgzMf1fIJmc4Si0PcbFuH/pHHHHEEUccccQRIz6NLI8PE68CgpwB/LX3+e4nAXw2zfS0rKzXa8hGQQ47zw7N1URddIAO6m4HKPsHsBGlKp6oEuHpajxmVkB+xTff65wn0O4r8LGCGoyRyKEWBwE6BqXZGSFxTKl6IBif1/Mnw0ExaVvk9mJ2rEqwGaDzd9kmgiUKi45El9lWIIvVJznoEcNBSTM3MEoXKkwUpIUxXGvDrud8PSiv8VhNTQ0CBhRj3PTcMaEcgWg0hdyprXA73d+OY2AOhdsIYWJJ0O4WtluMa/TdvyfYirpgJK63Y+hASc2RM3EW026Rs42Dhs1uJ+hDCCnM1yy75r9sjw3buqZbj+xsjj2eWLalg94AugL7tqA9Em5yay97srnOVx+yxwCaTJdECiuIr5hAkH4H7MQ59nylXPGOPsQ85N3mHO9z+VG7I2y76ZbIvUAXxVZsS+XSrMwgtGw2yvYIa7rU9IvrJZCm+0yAR7GNKoGbQlLrwsZdL2xJOamXltQR91yTkMm7XnjWKZFgR6lp7KCZ1gkc0CCAFrX5og7i3Mz3Mp0hOkAYYp0YGYABHVkisVGyJ6JNfW/YWBOIWNYdIsY4CftqWhTq3xOrWSI/NtALcpCY0JcyCVlBJ7fLDregdGnRfEYGq2WnNvUt3YXimbGxufkAkJ2mexNQ62N2u4A1zgbRkwCXNuajP0PEnbG0KfjKyTrSN3ZgVQjE7rNVgVXQF59rncCPlNokNlcJPVgYMGCGTs5Y8nEIQA5xTReBLkBXL2Xx5u33iv6GlfMEmHj3ZUa7jOf7vngpXxlbPvcB9GwMubBrKBHopK73Mp5P/ZvJSmSOOOKII4444ogjXrN4FRDkxwB8t/+8je+GWeh+NuNlLAfAgJDVvz+hCGNq5sZVJDV1JyLB9e8ZMA2PsnpeTxmAQ7zA2rE8qeJh46ix4g6k1kVoVmRiFfuvir7YC7l6mYQ2DAFDIU/AZxyE6i+ZLY/Pqr1qrFzT5uyI7slf2YYi6Q+QoLAB9CxW595DeDCOO1b3Ve08AUZl0uvjnWNDlGOWYxxWtP5Zag1Uar4nnth8EJ3yntnCIyXbRBfNJK0ySlSdZe4J06TpcR3HCcq5LHMbptIYF4KcgCAX6Y0xAAqYJQMZ0RYMJBhrJcahgEb1mta5FNdWm1dyLJrn6M96Xjc5aR4nARm1lWfyki9jviCFWrUB7RGQE6X+A5W5HONIMkCQ9khol3HPaZuPbfuXmevgCHWzVO33hH4u893FLaMEIPVbvA/b0qCra2JsluBr0fygKkxMMDFPlPEkuL5MWNOSMSVYoX4QajIJ/6Kb7sqYr2NekDLQFdrVRFrhAFfVwBAyPZFbELWUS6XeRezv23S+nRze9isjRJi1M/ruwEJgFEWrhADoY4OSJLjCpw6CW8wGw6COU7FCViVjdPQhOqpifR9le4Duy+jb4iyR+tDqVq4GAHh0xlWVLnmjZ/u0GZDJ/sySRsBbO+TCs/6F651YKYuNR3Mb3n3lYT3uNsPZt7g0/u8F7cYapD7YGbQ36MqQs4zr0ClFpAM8728A9MYOxY6OJUVppQF6EtB9B+4IshPkaycsz0vzfUmiPWfIHRnJZxW0Oy97O3fI2YWoO+W83d/wNnYrBdS7cpMeccQRRxxxxBFHvCbxKiDIvwfgPyOiNwD8eQxNkN8O4B8H8DuJ6NfExqr6xY+yoR9nRLL9JBjQU3l5vg1HMvRMU4IxGBz2uWz20mziqvOLdn1hlyk5R75cKxTSRvIHIBPiCTgph9YpKS8rp5kwhfPCTUJxkyRPCTpuAQTbMEpmsiyojX1SHFZtxVFPpdSkBbBgZTkksfIfSUVpUwgzXmaHG+/KAArKO3uscEdbEgCprB+CJTy7U9vjY2f98CM7GKZWapNJaqARNIAwKiBLJm2AnH0sdjh7pZwfGIKlBHQX6R3AlyVfwbLI8YjckiyxJx0aMsmkCPZJ1aGJ/lYQJARkC5AmjXK//Zm7XfjTImRTAmBIjRdPeHc/ZnOZZN7UtGeu3kaewTSQbdu2sdrdrjaf2mbnkGWMlY03mQtHfK6WPLaL7dsvBDkh3TAMIBrjxy50m5exMeRMo6QrktwUHa0AhI6yImcP0KJAlEtNwrWjm6qFtQHkPMzkO/oVjjYylwxN8xd+HYuDTW7mzxW7L2gCb7It/PSaxz0qZwcqNkKyW4KkcLrRIQEMiPFxElYspw4+dcg9mXvKNsYDBCudCRDHS0aSvcUw0NLvNVKydiTAEB3DuKcXBXyuBdAQc9TGgvM5nmMUzyRvip4FeCz/HJIx/4QGILj4/cSPdry4z2ljB1HLCdTnXDOWFl/GtWQvyeHHwXDhHdP3MW/6iUEnge5i8yeik5VSLQK+E1zfFmOrxD29ASwmZNwvin4lyJ1A7u0hwqtgvTNUxZyaCLJxdkF2Mm2dG+bQEUccccQRRxxxxOsQrwKCfMF/fi+A31s+p5vvIxo+A5GlJr2UWRSuf4Ifa82uMZIZVujJV4pjN0+CXF7C3TIIQmw16cBIYFjHoWrCUxJdWYabCcqLcggPvrRfpaw+E/e9gDB1v5eAILcaJC8rhRkns92idD9zNNfwCHHZABZQGTOh5xHMgjprXK+jaivsj4W27gk9xOjiT1bEy8p49i0uSm1+J/CFwR3GRtCSkOwAyGv7vRyn2kZSd9FEtTIfY/8MACH0QpTUCsowwAP7YxYo1FUhbbB/TFsBYLilrE7Tc5TdbEB6C5EnVcGiUE96HTgze9JyDNdLIW+vLmM1GrDEsp8CVPAElAdwGI47IF+BdoAlVs6XBxNUDfDFxDIHsAHYfOYLBvMitE5S88buqRBhNUtPmAW0X9vlOWF5tGM1L5cJkNDABcxJZxkHbQS9mmCkgYcORgSQFoCd6wUhCA1x3VRHGUcwjoJJkiUz5QZaxEoqSEH7km0ajAn/WYFIIbAUgMWv5fvdlrl/ussUwA0FUCr3ozYMkDCcl65jPqvA7oHQ0rG6OmOQwEChrbNpc5yscfrY5lvO2StRlpRARoYmo0qhE7CZNWdNxhitanbDsPs3t/c5zxdGys/GfRmlVRthf1js+R6i0J2MUeUEEgGsHM7nEu+uH8IEkKI98s3zdgAgugpwFvSV8/nXHhh8NQevei0iYl62B4KsC/TN3UpjfC7G8wphs/xsh35+w/V6wvLct3FAkMRYVf2RICdCv7dB688MEOGTaR6pwsCt6ENv0IdllOgcccQRRxxxxBFHvEbxKiDI7/rYWnHEEUccccQRRxxxxBFHHHHEEUcc8THHhwZBVPWHPs6GfCODhIB9OEloC4bFWGXGTmOpr6w02k9ngdSVXsLk1KC7OSTU1TztOkoKlNBbXfUdv+uqtqLICrjAoDQGPxYmApCMCwDQTsEsT1YKAeYAUdquuFmFrOUwcX4uXdOh3UF+nKppUFd8lYPZMPqU4w0Ae7BBRrsnsRRneaRtLKmtiHIZZz9fX9jLWkpJTKyyRslCsApAY9VXh4uFrfw6Tb469ahT1F2EtB5r8McJ7IyK1PaIr9z+N1e7qzYHuY5HUgtgrIOqaXB2xtAJqQ0SFs2kABbKcpCw3412xHWQtcgklOt6G7zZtsEOAZBaBFYeRUNgNrVIbOVbVp3ET/dn9n1/bi4uLcQrV2NwJLuAgOUFTWyjKDHiYFfEPG5WMiQnW80O21IbpwaJc4WeSAx5sTPNsSrXeXk0Bk9oo8hqrJaYu7rY3xrlZ7dVAjEWqw7b1mDzuMCnqk770Sqgk6LHvo9sh1mK/ohilF51F2cuJTXTPef3TtUrwjLmUsyd2u+pRGZRL3uamWO8W7ma9YGN9STBBMFgZAHgq+uprAS66+BFTAtmGw+cLOFRWIlZK2M2PUP9nOCZzRXbhSYMa2pXdHb2VI/7jv16xzwaFyDYG+05o7+pORbmHIMcA3amkzjriB8Lo4acqRXPMjj7KMfFn1/nboKx3sZ4CNhxjHWUc6uw8JYHwk7NynXK0PBGprPz0LAJwJ/bsL+zQ5tN2PZAJkwrXlq2A/qCoO95G+4Y+7Nm91B1yIqxuTQTd72Wkx5xxBFHHHHEEUe8JvEqTBAAABExgF8H4JsB/M+q+vzr7PLpjkiUq1YH1F7O4+U5LGpLMq0hEtkUuo+EOo8QbiVNh01lUyCEGney0Q8BVFGEhWkcSyMvKCUI0WBVK82wdgEmGjqS+6z5d4o7eRVI1dMgAdAwCW5meMKZmgGRSIWgJBzMiPORJYpTeU4VIyS1gziwAfh4Usk8so3lIC4Aa8BADA7mn0295Mio+ll249kgx+8YyUtcq6qNoFGiX/CuFKHUm75h/p4QJRaWSIuMtjJcCyOOcwMWZUlWWF2GS0oMwUmL3odrVkSi2v0AajT99mjHqMCVrg4cLJgEaIf+xwBU+h25q9DYbjjbeHtdR4ZzLgLdy1v0NNAzdXHG7cTYr5T0f21AvytJnQK68GiXDgBtSnodfNHV761VQA6CECk6A7Iy+MJYHtyd5qbcIHNgvx75+Y4sBwIDsgGyjTIQWYEuhF5roSYv5AGAkFu86m7bvAzQ0E4GVJ4F7S0bYLnjcf/n/BvJue6Mvr0EjI1Nw40pQLYADyO57bZv2NfmXMzr5duSmr6JjO3y+ahq5S8uRGwWs0MvBmGBLQboPJHVLBpEGs/DRdMBBwpzhZEyvtUWye8T3csgtdFHvetWrtPJSlgarD3Tc8lKDNmPyRtBrvwEf0ntGglQxdooK80gY/NHmvcrS8bIx/zKBvJEG09iFrsXLwsSK7UJC13A2swXuyfaA1tF1Q3wRjthuQKghu0k4LsdPdpwYshqjjHtwfRv2hUgt75tD1Y+tj9rkJOinwA5j5qeEDqmJxfwiCOOOOKII4444rMfrwSCENH3Afi3AXwL7JXsNwL460T0FwD8uKr+mY++iR9zlCQ334HdGYJ2E5YjgQnNbeMtWRezxzSnj1mQMzQFdFXoIlBxQcRbkbkbccjKiNAmWS+fUQCEbIN/FkSJQtnwxEUntkWu/t8kBYhdy+9V7DOSDwNBYoXbDhQgS28OHtVk4srOYnGnG5S3eWdg2PnGTqGNUh1EpBFC6DX7EO4MbThgYBoD+EpzfIEniUSAGxoMBsW8XfRdZw2LiT2TuQNZkqiYr61vkyvgXJoR18VX09mdMjLnc20BLLD6/0ziS2Lr10l2Rj8Rlhc09VNXJGgXyf7NEGQbJUCFwgSRjon9NLFk/AAk3i+xTpEMoEZXga6A3JGBew5gpFaGEPa1v5whUACECfzqBOwMLc44WAR6D/RVoQsbc8YBnHalwQwhOCBUgJ0qOKyzpgrgAqsKR83KwIXdMwFCYhbOkcTvzk6aFIt9vJwxIaxYzkZTSftZcXhJCcySjDJdCXpHCaqKcAKlgIEHBKTGDrFZ/FIFagDIxhBvIxWtoxDBlJ3NmYaNHTczryjHRsmecf1+6HPoSYCrASh4dPeRpKWUeyCuI2BWwZWWtI1xmwRpI7qPKWEIUhdWHoJRxApFTMxxTm0KuRtgnnq7EvBqJkKsi+Y8AQo42wKUtGeGLi7uGs+yszqIY8elR7Z5kUwwNR2TpY+53HRcC/L522xseMMkcKvuWtbvXG/ngbA/X0xL6Nxzm/0k6BcG3zOWFwR5RLKxQvOpPRqYQjvQ+3AqSk2flwG/RxxxxBFHHHHEEZ/x+NAgCBH9KwD+NIAfBPCjAP6L8vVPAPinAXz2QBDUlXj7O16KabPVZOq+ilZYFBJOJKcCLMTxAGgPtjZDY3mOSwIQSZ2+z4smKbAiV2Ute6kUBXtJB1nSx0FbLskvw8X5nMmhrLPbRHXq0NjOX7TDCcW/o07ANi18z+Uy/LIvbPx055E03JQMxXjWldX8OhIJAajBKOpKZZV8gCe6aPY9V61ZIdG/SJZkHqd0con23a6uu8MPgCkZm7CUFDV9+mUyLySSGcwJfTA2SJHU+uJaoitlOc20ul9rWwgu3GmU+23hqSwoy05WHeVQGCUiyczRUQqT9sIY98dIZJ9O2UhuW2cEKyVKdmTVBJk0FFh1nNcOgOEGgjKfKrgFb0NYyxbmU66kh0PPSbEzwOFos8AATU9u01nI28BXuxZyslNwt8+yFCjLSMr9WicBAbsweulTMIOGw8zYj13UuFNqHWM57XlY7eQAD4OcHkIsWBZBawIihSphL8KVfW+2j4MORApiGWSrGCuv91AltKXn970TRBgkXr51MtHmCfQS2PgHmNYJypzCpCYULZa8X8kYb4WVlHMp5sDmoF+Cs3PJjy4Kua+IY9lXYf+CKZklEWAASDxT4xi3zl4uMtrV5kRYh8d2wgTdDLRQv/cqUzBtsBdnIok5q6A8d4w9QlnaQp0g8f3i5Y2LDttkpXQaChBHFwEWoK9kDjS1xG4Rc71hY220C2F/aMPSlgE6dWgzkEru2OZ/ANhS5rSD6FyYH1aiNp4dRxxxxBFHHHHEEa9TvAoT5PcD+FOq+v1EdPtq9H8A+AMfXbM+2eDLTG9OC8zdVsoiIYoI7QIB0HzFW2Z3RVvVz5IWSlDh1vKysgaqtaoyDRp4UMOBWY8kSkSaojeGLGMlD4q0JAXZyqFphnhC1d3+0ZkGpOXl3tumQaX31XtWX70O22Dvcz9hZlHkQDmL5hrJiDMcYsU53DoqCKMoThGjz3IuLhElIY4XeXjCUQGpSF5mW1xPkmOca4LUCjoRuXe1Pq6rzTlOfo3U+mc1JSVhymQoQA6aAASNa+DghjrbZuimGHuGGNCwGq0r82U+DDaEQMN5I9ot3r9l7Cs8QI1oq62e09THYR+tAxDTgXVVrQn4ijhvlNavphViIEw6FUlp321iC1i5BWM4rgS7Qa3cwsp+KEETWRR9Y9djGCvuko48SIecTOwIULc04lNxQGmWiC/vDR0T3uwZsNyAZBk+15aH2t4AXeK4ZfM9AJwG8Wu9nVom0HHP9wp4LYK+GjOkNbMvVR3MECK1Y6WDDAHCYxo4cKL+ewAkY3/K3bAY+mjMljrfws7W3JRShyfcYU5W9hEaSdzj3ij3mZJfIn/uyrhk0mg8wwiQeNAWkERbASVoHBOAgXwxnyY74jEn0Bx8ACDMo1wvwKoV0DUoQmqgxQlm1VvGhxbB6iyevrZRvigO/IKTURjaTfY9nMGlAFNaLdPFx7A+hxcHDUXnMWSFtm5aSGz78YUhdZxc44bWDl3EtGdyoH2e7O6M9UizphRhYkodccQRRxxxxBFHvE7xKiDIdwD4y+/z3XMAb//Sm/MNCLEVL5KRpNDiJR67M0B6WTWHV6mQv5fvnpzejfdL9cRCO/LFWVY7fpR65Mo+kC/t1cLTeBziidRIrHKlPLNQAi2W9CrLYC2oiROGFW+szgZzYrBEdFoZfxKhQ8KAqpHLxbUeol1yUsABHmMgPAV6wnKVgKTXE6vby3obyf9OHQZN9oieZZQWTOKk5PT4gUfUWv2JdSHj76EvoiMBz4SMJkAnxl0dnJiSlGBM1GSsJhsOcqABfQmtjacAgHqbbHfK1f/KHOKNsn+pT6Nl9ZZd0+Pe9QcyeVajGzQdK+aEFGLMshWyhJCS6l8SphwM+Go7BoUhSq8kVph9jnkpCu9hP2vXfhLmhJemxLXDfB/pUu4Xv1bBcqnHaQHMuFaEMnIlH7AyH2006frIgtTokZNv6zoVfSf0UzPrZZhwa3vEAHteErVEShZKEVYtZXS1GkyZsEDRXTgVzDOjKgYk2VnN+gSbSzgJaBnlMkRwrRGzqLXnEKw8B84AUdMdkqbgZgKnWS4TYAgbmEasoLNCu4FwwADE+n1DvxgbZ9KZ2SjLRGQFeHfgt/Ql2FBx/bQAqPZ8QN43o5QvQAz7XYHJYnca3+wLzRPL9yfyPjpTwlAhHePIxtTQndPunE8dygUYIYCbgh2QqiVH0hm9aeqE6ELO9iht9HIpFZ9ze9EyKowSrQrWNeI5dhJ0xWB4BCOw+Tn9tDZPBM1RvOhr74z+sECX5tcyOkHY3sLTEs4jjjjiiCOOOOKI1yBeBQT5OQDf/j7f/VoA/88vuTXfgCAF1vfgq5X2mYaOhBaKcElw4rMOT/RuAAQTCHXHkXgHV1/1CxCklG/EahtjaEGQAOLWFMlSKe4Z1ClfkHXzcpm6akzuohIJiC3eZjIhoFHeUWj6owTC2ymaLAp1Gvnk8iLWrmSxKCZHgUzayznSdWTzxBnGtska/XjvX1wPg8tYkYLaAIXieoXLghY3DER5CwPoM4iQ7QMyKSG18aIyDlDYCrkAEDawrJwjV2cjWVNYErPodJIQydVFBzskIkCMKG9qmBLqCWQBEgCIfakbrd+AEsYuZNoeMQ5RghMiv7XEJ9rOmsKMCjXNjgSPyrbdkmgqcxVlimXy2gCOOovQH+jl75K80jqOk33sSLFa+8BDTes2AAAgAElEQVRcdHRxvGqx84TmB/tKexzXwDUynRoAWBxkctHX0GHQoHYsAFYFr93m6hnoq0Au1oB+ZgdCSplRwbqmZ0UAcjB9iejXEBQu9/x1JMehsRPlIylsXHR8lK0URRZz+JB7sdIKIEViobAEGDCtjZxLBJR7rC9eKuP3ETuDQ11AVQFjnJw7lkieScHe6cfLiv3aIA9Lshji/Gg2n2ShAbz5daS8PgDWUYIFeH9j7gVAsRTQkTT7gHof0bgm1kEabCa92R82PtQUxN00UMbOACl4UdN46W0uc4GDJF7esm3Nx7kwcljRkl5l30EKo0ZhuindmYKKqWxq0k+Rm+dR9nX8O5JAjg4AUtWYiHISYDPBXS2AGDUxsFMMINEzGeBYnjsPBwhyxBFHHHHEEUe8pvEqZNf/GsAfJqJfUz5TIvoWAL8PwF/4SFt2xBFHHHHEEUccccQRRxxxxBFHHPERxqswQf5NAP8wgL8B4Cdha09/BsB3AfgSgD/ykbfuEwgSo2zbCr5/qLECW1YZq+aFr2Szr9aSmNRCrNh2GOWaN6RzCEWdvh9DVhpyEkHJ9/3snJTWtxEpchptcqFQxCpsaF8AxZbWN7+toXc9EQrNEcLs3JD0dJ60SqpuhkKTDZJuDW1sGvoLsaKfe8ax91g9p1z5D2tKwMpuaHcrzn20f1iODr0PjeOyTuyM6DeJW4M6EyQdbN21JvVFnFEylTz4UAaNP3QV4jql8GpuP6yOky2gsPITr9uZ9CGKWKv1R8fxQ88DGGU6e1k1Fl+5FzWBRLeF7fc8mE3FYjTKuqobj56KxWlcpKmkyfePsYnSk6pHstgKfZaAlHmdbJWN8n65jdgvtocaOyjdT4jcitTaKmdfdQ+9jR1JAKj6IqnBE21kWAlbdC9+CT0IsdV5XhTt3CGpX8O43rEJsvr9koK+fg3TacN1QJRHOwFz2zCRTIwyj5hX8OfFTR+MlTbmBylSOLddCVsnyB2NrkTf3Zlm6IPEuSjZUboHM8ra2EO4uDwHtjvC6dmWLIe1dZyWjoUF53XHZVvwvJ3RF78QG41SrObz+Kb8K/RC4HOdVkmXHwBgZ50wm8YJEWa9DXW2DCt44+kyks+lYKOMEpTyzHAWCZozTnxuJ3tGyJkSXibYyRhB+QwUaDCEqEFOzq5IpogzZrL9fs/ng42yrI9AyUbTU7RR8z6nEOBWgPaiGeJdEmc0pQ5KlApGv2Msgi2YpZUhkAtj2rDaz3hengX3bz1mn4444ogjjjjiiCNep/jQIIiq/jwR/QYA/xqA3wLg//L9/0MA/76qfu3jaeLHHArsd4VKD4A3BXdAoNDmNrhV+HSP8g1Pdm6SOvYkJxIeIgxbySz18Bd3LvoBBfSIxDpABAqa+kT7jgS//B3v2YtmEkoKd2koIEl5uY0ylEm0NWIHKBxYmgsERuLXvbynJGy32g60w8st4mTzONJu9H7oU+Y17QRe7LghPGsONiPps3KZAR48qZ0HUuvDdvJEsmizpHtGJH91GAL/CUo63Zwi5828b8yJzDGDwt/mUhTSQXlPa9x6nVyPxRL8RKaKpol/tzB08TEVGOATZQhKmetFGysYJRifZz95JK66hq6DHyvAwjIXdQXIkaU6zwEUoVc1oUxgAvRCY0V9LIf96bgnlTC0SAiZvIcjj7JOJRUJZkVS92jAo2lSBIh3I1a5E9TrtvoioEUTzCJ385CTjIQzB8+ub7/wAEGihOw05qSsrplTwJ52nQGCOodu5Sy80fk84AuwgtA9Od6FLJF1XRbqNg9CF8XmKr2vjkYCdqG3IlYGdO2Efh9lQRu23nC37iBSnNcd+5mxeaf2ZaCd5Dobw+kGDgj4M9Htf8lLZACfA9F/F3oVIQhafp+DQzQBfNZAv+akqXkzgXpRMiiwUpeY7zJKFbHB9D/uSz3jlfM6KrOV/MV5HxkgJGAGBrCaXktsowFMeRvh5X82b90+91yFcsjAr429pNGut3VhlKLx5mK3AQYv41qDAZzEgCkvHeLHG/In69DDqc+lxYR3X/IvwhFHHHHEEUccccRnPl6FCQJVfRfAH/X/XotQBq5vIy0DAUuQ+QpPLs39JEEL2EosO8DRzwOEqGSAqOePVbx2VVvBjdV5gjvA+PHDFnECQShFSJUALslLiGiCrL3SdBIkZYbZqzYAZC+zuhA07TbjQBhJeAFzIgHNVX8ASgSRYi8azAnyBF48mc3kFikqK+tIbsYgjf5kMl9BEgHoChfH1DEu9fr5KrkslCiKVBSmMjyquGPBENIiNlb4b9sY7VriutXv/Rw3iWwm334u8pVdZdO2uF2dj+QzdB8ymbllaDBmxxrXGpGm0JMJVfLVk6vShgDs6vWJ+T45V/D8OQDI7ralZUy0KSTYLFyue8zrmsGzgznuPoM230+Z9Mv4GcdKIIeR8z30NzTVicf9kyCJC/o216cht3XV4qxj9+wAURJ4UWN+hJMIYMklncwS1RJ3P0aMW3c9GkGKaWbffJsemguEZCaBMVx0HAjVBQWMugFC2L6njuFe5X1sF4K41hCHeOwOyGnc66kv4WN7y3iq18KYXITLtmJ/wy72w/0CNMWLU8eydpxOu9nqhq7IIikCSmz6IcYC8+9Jwc1sfm38yIVhNf+WBCxd0FWHvawGwwUwxtTJGx73s+u98Eb2HKjCx9aAocfUMYAPpSF+rXbhetU52ov+yJP7ezCgAL83TgSNOcSY2DVjjurQU1oE1OYx1IWmfycorF903B/TM1MHCBICzibyan1VFJAk2lH1oKTcT0J4/rW7iaFzxBFHHHHEEUcc8brEB4IgRPTjr3AsVdV/5JfYnk88tAHXd2SINcJeIPnqyQUHq2K8wwbDgTeCrJr5XqxyB4CCpSQ2kRjGMQQgt+JtF5rLXFCcYvyFlzw5zQSWgjXgQAMoyzWsYwTdADrpAAoUw+WhJvKRHBUQhchAlVwZDp1MHVaiVrpS+rSP8QJM4JOvnlwEwFMSLUR5g48N1eTR25WJYBt/xzgDAK7WNlndtYZGEp+IhmJYFEfJhtycA5jsTKd2yABwsvRlyg1GpwL0yoRKCh5QEpo6l+r5AxjSBCDKidgFWUspTa52s0LvugFfd5Zsk5cK8A5bWV5Gog8qoFkFagrQkIYc1wLyeFIuCxIgqAm/NgcIFFlzlMDeAkiwiooj0cyUMQZWCt36KWTVSSQ05lpuwKXNFYzsY3tjWhV2VZkrCUxFmzZYqUKWZrn97CI5ByqoZ04cltROiWW9fu4SFGUi2kqHSnulVF5U+9gUS13s8/ZIaBekfXd7tH7kc8zvXb1iDr/O7M5XJOM6KtMQGhb7jnbC/twm3f4mQxogpwXXs2C73+2eK4KjybwgsedfND4bZWDHvjOkN/TrDbJZngPUTEg4xzGAtLhWp3qDwRk9DPGLECy3APECJKpiyfWZafs4EPbYoCfxY9AMDhYAIURwa9kaK6AbeamZDoFm4EnJHgDQOhxmyLchArrYjnTu6P7PdThpTcwhHceOSwzSIca62ENMJqtiuHg0zUwZALox6LHN9shHHHHEEUccccQRr0l8PSZIVcIAzAXmVwH4WQB/B8C3Avh2AH8bwN/86Jv38YcugDzrTzUQOqE98ChRIJ3yFToBso8XYCUYEwL2B8dqXfPqDfKylwAQis1r6JJUnQYuYIGs9nI/rdr61ze5xUiAOtw6lkbSJzrrnCAXPUciHuNC/qJfzkudzFo3M3jksdEUHFT7OMZZIQzTa6CRxI2s2MGN0BCQW3BhTgCjf7X8iPsoNSJ1RkxZGY12CgBEgsg3Y1nKe5JWXhKcBCfgNwTRPBdijCSSSpqcQAC/hhydHsebSktqUhOlWdEVgpdvhA6LJ327J/VerpG1/TL60E+YzjmufQyutVnhoJb6WPjTgQJESTDDzvGkrCcOF3bJ0cbA/wize08cTzHpbOQCvii6t2GwTmJA5p85ju+3ch2gTyBxPt4TGBYAVOTZFXQUAnW2kpoGKN3YSkdSfKMNU0vM0lqZXAejEeQsab0ac00WJBuGvWQsxksFo7yFfK7F0GyuK9JtvmUfi9ZRdVw1qYrRXg3dldWHSAwkWF4MJlC7EvrJAMd+MptcLDon167FEaUt1GTobTSGeHnWfmnAxgbW1ZLCeBa3UtJXWBi6YOiJLJIaIrn7xlBuNmfFx684Vhl7QkHKT+YmYO3mzV144mFB5dnpYJuUckOJkrEyBXgnSLCjSM1xCQbsTLV/3t9wqdGXPQdZgWeGrGq4G9XnXGj21Htcvd8BFjUFR8lN3AZsQKte23wNQkPkYIIcccQRRxxxxBGvYXwgCKKqvzl+J6J/EsCfBvAPqupfK5//AwB+xL/77EVm+GM1EQSv/zcLTurzSmCsYutJp1XyTBLbYF+ov1sGsyNBEF9p5R1ol/KS7xH0aupwob2Rv2XTxUCAWJ2vlOxI3BkGQijDyzFu+49JaDT2l7rKr6UtQtBcPVfTC6nJn4y+CAmwYrAV1MGcYEl0Srp4FdS8TdYrU8Go9iP50LCZjHMTzYmtjw2Tmr6J2LVIpga8TzTG+vbcoTFgg6wziOLJYuzPGw3g5GacqyDqVAoSSVhAjgV6vNXNiM9TEmQn0zDY2bRrPNmJJHEKKf0IbYS4LgGgFGAiEldslAl3tos0S0nqNUqR3LJtLbUyHRedjlXndOqDxOfRnlJaFgfSkniPcqTSP2Dch5U1gnH9ntwPMQTR9jjuBrBaCYsBOTSAK2Ba6a/DwkWElcj20ZVG/xjFmtiuZ+hcKCuk0cQ4qba5UcITTJCwzw1mVGyXgKuW+URAv5sbG9bgsmqCJ3whnN4l8OYD8kAI0Cz0Z7SN8i1ZFanZozC9ilOZN2IgpSqAS/PyFczlVjmkLopKOgQ/nWmiGGPPNK4rN4GQojuDRLv9Nz1eya+VWznrDQgCAHoi0NXFq2HzKb/39gcYpbFv3NsKK8mBDj2isPqG/YySKhVAhF3I1sct5lKwPYQgWwN5CREvkkyXbID4uJdSIezFkh1emhNWx6HVIuU+93MB9szTZ326N4844ogjjjjiiCNel3gVTZA/CuDfqgAIAKjqTxLRvwPgjwH4ix9h2z6xIBe9m1ZtYxX2xtkAwBCM5JHQcVk1Vtc8oA0j0WkGLKQrCHkCcb0pz4ikjxzgCG2R26QMsVKLTJqzhKZu56ux2bbCcIjzhNAel3IabTQDKwFC1LyXjZmQDhaxehwOJBe2hHEdb9LTeJbzJYMFt+eIhMxLI1izTt0WKtX0RoSejNHE1vD6/wHmjPGJFdEoaZLC+plKfoKu7mBOHtevcTIfGAP48U71s/UjVo+n6xBj7loQ2spA16Q+QBCldHYIYIo6gcXLTWQGCOrYm04LPQHUUsgRmM8HQJtpPujuSFyIT9aV4+wPDfZQHTa5OebAKXIjZQfpuI75GEPtQ0TXSgrKMaZV8MHSkDL36pBG+3OqvYRpVe9LUhiziiwpvtWGmRxvbsHKPKD/2AjSkYySIRqsBaTQkZhXkKj+fgL2pqCznaHfIRlI9ryxucSpezEao6xDlyU6HvoU/hntpjGjjbE+j+QYec/wDtDDjXB0AAqEHCCdUMMy92qJVS3xigGrIJ7W85OBq6QGEEg5vs+zZe05XiqM7iwM3TnB19SUCVeXMv9VTCR3EotOFk0AbDFWNl6T45P6MzGYKApoONlsPs5RinJl0JWnchttg+FFSsBDc20kAGcZgxTMuvgZQ5Z6JsFoAUAECTulRQx4iXsT49xx3PV+e3pTHHHEEUccccQRR7wGcbsO+kHx9wD48vt89yUA3/lLb84RRxxxxBFHHHHEEUccccQRRxxxxMcTr8IE+RkAvwfAf/OS734PTCfk6wYR/WoA3w/gNwD49QDuAXyHqv5s2ebb/Xwvi3dU9RfLtncwlsrvBPA2gJ8C8P2q+lc/THsgZO4MzuYAjMGR1Q/LYHxUpkesSMvJNuRtZiDIomh9Ls0wtoLmMcz6dQiDpuJ/9O2GvRBMi7FB2T6+q0wQQq6QZylB2T6OES4RVEpEokQldDyivKaKU+oCSCfvr41FLVloDwRZCaLi5RaY2QMY7ImpNKKsqGLxsafBEgmWg7J9b6wD0y0gMRq9jXFhoFRtk2JFXEtRtJQAVfa8rt68YDtUAdq6nQuXTs4n8Gu9qJcO+HdVwNDnRmoSsM4LsKpj3Jr9HQwHcheQWVMByZ6J/mRZjzOeqPRQWaHgwTIIccul0iBu+kr65BgxluSlHPG9lRuUPmmsxNe/fcPC/Jjm90tiZpK8jF0yszVS7yXYQB1ZCsBS9o//bmhJpqmDIZRb2R43rJgsnSt9iHtDBeDOydRIhkGwPoRcPNXH44YdEja3pjOBZFrJ/bjmGtowrOmsMgbC23jqbmPr7SuipQDQrwwVwhVLshD4cvNM24HWMdxbQgMDSLaU7jSXmjSdNGSy7zmYSL0Zcl2VYJfRZnpL0k2vQxnoiiwZEmZwUzALuAmYAVp2nM52vOtlNUthBbAQqImxwMoxVBTUrMJEmV/Oerqdn1VTKhg/fhuFbbI4VSQZdWFnvNFUDkRq10BVIV5aRBsBe9gE0yh3y1LNuY3hBBXiUvGckGDnLDRb9AZzJMr4GO6ug9c6PnXvI0ccccQRRxxxxCcSrwKC/ACA/5yI/gaA/xJDGPW3AfguAN/zIY/znQB+O4D/BcBPAPjuD9j2TwL4SzefvXvz938C4J8A8AcAfBHA9wH4y0T0m1T1p75uazzhIZmZxOH80hdA7sWEJ4uif3900dSzoK8KfeR8kVVWULMsia/DfraKgmpzAIWMsq/LqMXPdsGSDHY7TOpDcNPOA9A6ErepBCGAD4zkLsQ7AaSOBcjseyMpzIgXfG837wZ0RJIPmNAr7TYe0oCuXtoTSd8V6F1BwpCTlcU8SSii7f5rutB4iCMSIZgZ/bYx1CnR1cXH6WZWRymDmCLisGD178QT0XSgqfkZ+XWK0qdOaI8VqRrjqDwAD1nG8XW15DadFkpZSNri0jh3tTpOgdSNvMzmJjFeFapDn2Oyz42oFrzdtW1K+RBAkO46NgU80YVznO3aFHCpAlo1IayCoVWQU1G0B3SUEvgGqpbwopT3BGhRQ+u5MOYE/NppDG/M/VqiIJTJtUZyON0wSPvkACwmPQ0ioCHdVHK+YPysLkax3+21JoE9F5pC13GxpLmtqes0EM3ggbnVaCauqY8Rz6VVRlkKAPISEHhpFrGmfS2UwIuaCxTPyKQK58BSU8gbHVeygWyPwPKC8nnB9Rp7K1MfpFhPyzL6WbU1AL+3K/BJahberi+UDlNAAiLaCHtXdLXfh9iuoi8KaQxqYoBIE5xOdoBl7Q7Q2MUm194gDCci7aafQatYOY2XmE33Vb3Pojzs5tltujtjTnNx6SEyTY4szXoyHkVAuIKvALCRldsFSOf39hNdqUWHPpXMbdBentXxXwWlmqK/eJXXg89sfLreR4444ogjjjjiiE8kPvRbjqr+MBH9HAwM+YOwV+sNwP8E4Leo6l/5kIf6q6r6rQBARP8yPvil44uq+j++35dE9OsB/A4A/5Kq/qf+2RcA/DSAPwLgt37d1jAgd4rqTCJne1mUk0Le7GjPdqynHVyShcf1BLk28MneTGVtqe5vL6MKWQktEvNghdSkSUcb1J0QJi0Htpd+6SPpqElZ6h24aCsXgUHaZ/HPWP3mCnR4O5QIHKvwMwIxHSPOm4n5rplssieRUrQwjKEwHE3E/56OBYzkM2vu5/OpwFZrOZZQSxQGhzZ3aAjh1D4SKXLWCFQngUgDQTD0ByI/S30PTdHDYQf8/iCInDx5CVZHRyatprPgG5fERpuPadj3lmtErl9AG9kx6YZlUZP6aFaKgpYxBE32yHUuktoKP5gGG0KQaFbVpAGA/a0+J9/1YjESgahuGt7YMe+LFoMt7ZMxH5hMwHa/uc6AsSNiFf7mUgjHtbdrzukGUpblgQQiguWEMia32jyp9wK/h31qKblg5g1oGAyfycq3sHRy3vn4spABc9601mHuMwXEkQKihH5HCM8m2ytYJyF+Cdh4xnjHZ0txdtoYfeM5mY/xjPs0GAKsKd7amUDC4CuhFV2ceDSSOPihSOHU6q4EeB9vLq8UkdG8P+KZdS32tDK0i2gLxx4UFpX3kxVKDbIYeN27XRAmF0z1NqoCEBu3BCm7gV28iB2ravTEMHEZS2nl2WBgVcxRdcbTpP1TQcSX6Pfcgn/adP6XOo5VwRPG7CAm5M+WABzJGC59AFP9HGOu457KZwhAj21+GL+e8el6HzniiCOOOOKIIz6ReKWlHlX9MQA/RkQM4FsA/JxqNSz9UMd4pe2/TvxWGBDzI+X4OxH9MIB/nYjOqnr5wCM0AX3+aiuvUaKghH1n8Knj2f0V53XH0kaz987Qe8JWX17vOlQ8+dvYkqIzoKdmL+vb/CJM7uoSq9eIlcmSfNuGKC4GdakO0JOiqpXShdG8tyROXY9VQk9KqkVvrJibnSaNRA5jG2M1OMDhQpQjIdHCcLEVx1tXFFqAsKatDIzomwJJL7eyl8gYop/OCqif1XwkxjTGqaq3MixBdPZ7lPZYEuxASaWCYxwrSoMiYbHjjuuV1HnPE4IBEqyPbKrQEwHYumI7lcgAtjJfv1dkecLorwEFKO2Y2BHBFglwKI4TfW/+M5PYUuLgfSaMz6a5S0jhxicr3+Gek3a2ZSDEN3Rh1WwzMDthTKDSnAw+iSlpCwaK2vE3T3IT7PJx9babcGURUD2VMfF5YiUE1gcO0KEwlQjlfilAhQIOmjmLK0AKKUDLS0rDgnWlGCAVlfvBQJgAWpECt1mWtDkDiXQ4Hk0lEpor/7TRBM5E28NNaQgCk82neDauQ+SXHLQLRxpggB4x3wyYRZkjyFK7CsJJcdMKsCgZbLUPvh/v8BJEgpzdBhpxbEomhTYDVPrFJmU/ydwWwOZLFaAtrA/ykpon4c8ZDdCnUwK8Wh8AgDF0KnB3EgNRNjah1mhLfM/j78HKKiDLZD89gAxq456uoS2caMrzOZ7bG/mzfwZ6aAfoOvr0usan7n3kiCOOOOKII474ROL/F9/VXxy+9BG35WXxJ4noPwLwHMAXAPwhVf3fyvd/L4CfUdUXN/v9NIATjOr60x90gtYE929cp5pwZoEqgUnBZJTxbW/YxV5Yt62BCGbH2Bn7xf4OC0N1+8NYRdSdgZ0skQjAYidfyUS+0KYDCjDcKHpJ0oMN8aQTY6VXXpIwJFtjGy/V4ZASFp7iCVoFKczxhtBP4yWchNAe/fur/c7OrmhXHe4dsAQkj+v6JjK5ppizS1pCArNWBHsC7slnMAtux2ACjiIRhidUmFdaVTGBHgEshY5FlnS0AqTEirsn3bqUNMdBhVxxLQmMfU35eyT5FUiqDhm5R5yvRGjIZHJWgQdPaMhXlYXYQQrNY6e+BPkYL6Ndplkwxsn0S2x+AhjzNEAHAahzsl1ixZvUi0uknGtcjjGGzvxIMKSAWOZq4eMfOiMRkZyq/z5NA8pjKCv62dkawRAAvCzB5zg/ZV3l9XDQDKtCPQNuzTRnZprSmIvJUOEyHxZjFwQrhfoAGsQ1YoKVktdUMErZ1O/BYOQ0dXaFtSNZKtGe3cBWcLm2kez6lhVcuC2hs3OMXDh0JUTJygGjz6tWXM8ACWfu8BWpm5L3QZnvpEgALxsvQEutpLiXR1vC7hcYTLN2BZYX9l+P+VD70Kx8L+x6I/odGUAV1/4WfAAm8M7sye3fAJGnaAgxrKyJ4LQdjAEMDZe4F/3j5bSDWbEvC/rV/03YCuCgSPDUmGD+cbEyhxjzicjddnzeE839DZDX3GgwQPDddEhSH4kowdwci5u5ccTH/z5yxBFHHHHEEUd8MvFpLfq9APiPAfwozJHmuwD8GwD+eyL6+1X1f/ftvgnAL7xk/6+U758EEf1uAL8bANZf8bkn34swmAVdGM8fTib6JzTRpencsZw6VAn0YoGSQnkUtttq8Ejq0AA6DYRChaBX0xWhslKaq7L+Imw0fErhTsqla4yXbX+hT5FMGL2811pycg2P7tv651YGYeef6tABr+X3sqCTs1REsXh9f3u0drWrt1NmbREVf8EnT0h0tAUIUCNYLtYqJQxGjo+HAUJkwo03QM24aHCgaf54YnnEdjdslAFEqI1lWFdGpMWoMVKkglEOmmR5RxwvgBnViW4uJ0tsbhkzo8GY2pv6AA4qRElEFRUlKsdUAl1hgrIBOMUqbzAuFh/UmGvs5QGe6JKXTeQq9U4JlETwPoaIhFKTAwSz3ozzRidifGOnAtZRd9AmABIAWMRWrytQUkuprpRggPUBmQiCAT1LWirbDvASEYy/67AHAyfnpgILIA5syomMZSUDh7E+xQEwQJEAxlwMOJrNV09YA/yYAArMoGX87OMcYW9sY44BNCUgZmUpqqiL+qV8jXIOJctGSxuUhvgrwcQ5yUDOYKBEn3Qx7Q0w0DuBd2sEO8OEvDQvAImc7w7sxH0ozc6fekrFQjw0iPr9KN2Q1drHV4I0wvo8zuW7afSPQGdN0CS/7wxZFXKHtDcH4KVNMfe8qeLPJx9o2dq41sEkI98u9D+AAYbQ2G5S7dUyD+P6SLm/ooTPAXKJY1WdoQCXJB459hzQiowpxr8XbOBVPupoACEZNLSMTKwbR1h8bO8j9V2kvfPOR9zsI4444ogjjjji/eJTCYKo6t8G8HvLRz9BRP8tbBXlD8GU14GbfKTEB76+qeqfA/DnAOD8Hb9an3/1znaqezlduX1lRYuSiuJKIkLokbgpwFeeWmIJgL+ER2JWXoqpKXQRE/VrsVJckzpFvP2mHkhNAncq79QlUYzEtDhP2Mq3JzDlGLxjrGAjkocyiJFQLEg2Aelgcyjbir2sYzWYN4C3spIqamU5vgovuxpF8GoAACAASURBVJce+PmgNBx4FvWkvyQDJSGMMdV6ocqKLm1sf0d/4veajCd4UPopZQAoWDzO6uk8wANHZZSRrB87QBl7ha24Jl3d9y3uGT3AIWAwE2gcS0v7UruCCoDEld2itorr8g7KSD2N3MZFJu0chDkDw0imnTYPVu+KgyIroO4UAhgAAR3nqfN2ApWSUuB/x3WNz+LzKDXhwsYRB3pqYhjnE2NX3LriyOrMo3Aiin4BA6CKc7+M5u/AYrQFVaOHFd0FJnOeRClVjEHpfwB39bkhJwwQLsCMeilyPMZHFAwywMSWfZ6RuJZNGXo0DLaLgw+K+lzTHL8si6qgm+gQ6iQDNJUtwZ7YIgFSxjivgu7XWnYCuomXigMPCXr4+HC4pbgYdAjF5vj5faReBrjfKzT0K1ynQzpBzg2k/LQcxwEmgIajjo932+FlfmVIFODOgzWkas9/aZNjUtWpCdBOE1jEHAn20ShR8Y+2jQcguEV5E43ypAKA2N8x7mX8mwEe9VlRz5GlZaH15OBuuPz0E/l9zEO3BfNxSPHB5Wi/TOLjfB+Z3kW+7e9+2b5HHHHEEUccccTHEJ+ZVxxV/VsA/jsAv7F8/BW8nO3xTvn+iCOOOOKII4444iOJ433kiCOOOOKIIz7b8alkgnxA3K60/DSAf4qInt3U4f46AFcA/+fXPeJOaD8fHpL2I+r2eSOcf8FWQeUE7Pf+/dlXAZ0+r/cdoJYaCnxx6jycDs5kq547DbHKsNxdBFhgQnmdR+lF85IEIdCFTZehlMBM7h4vY3LUUeKxephfiIkRJgX/JWtQ6iv0dCHoRunKkN8z0O9stVaidKc41PDmooldgZ3AXd0NYbSLO1ynw8ZTG6yM5vZ6qPfTtVoixK8VgCwHGav3foyJio6pjCJZAV4OoWDoroNhkiv8lKur1AQcq8YCKxuobJI6/lkK421o5rTRKcbI21JLRm6uX2hF8EbJCpm1VWw/tdoFmyvF+iTFOMnnQW9W/lFKq6gTCkFl0IDgc7W2p5NdhygxEL8X2MeDxuc5JqRDkPXG5SdZEx3WJhoMmsH+GGOU4pt1tVxtnnFnyKJpPz2sV2mULIWmxi0bxFkg5Po12pH2snEPUS6RYxLArA46eW/GXC8OPmHKq2QMCKGbcQCsBCbmfOhClA3CTYiyjGx0MXVFXJdDmybzKh4UvOsQcK33j5+bYj5K7DJ6qkrJXBL15xnNejJoir4CtFKWy+Tt56Vm5Iyd2I+2UkJFzoAhZ4OsmqVaYDXXFgJkFVyvJ6zvzuVlIabaNqQwamqSRInJyXSQgvnCG1JzqJ91aCD5Mz5spaMNsiI1eEAErd7eMVdjPtzaggdLw12jtJRxATCWRzxr/T6srAxj4Ogot1SyfzfqdJ4EohW0CNo6GiFC0J2xn5qxCq8E3gezSZuzc96Xw3AEPo73kSOOOOKII4444hOJzwwIQkTfBuAfAvBflY//Esyy958B8EO+3QLgnwXwox9GiZ0EaBea8+Qh7ZEJRD8r+hteInHfQcug15/evGI/NcglasYXkLojzO5aFv6yncJ/O0HOVsBPp25J9bpDgh4d9H0FhBtADdrNbhYA5AbsiHIS3IIgU5JUM9kCFoSVrNT9DPRIe1+FaZeUBFwXKw2x2n2j0vMGyNmTrSvQLja+IZRI1fMTAVwgEz5Vp9/7tUmx0XjdJCTYFMccjjcE3By/WqDGNbilnlvnCLoDHIl/zIFKeffSBt3J9BBi7BSg6vJQX43JE96dgEZG5yeF+rwShgNcOlm21usXjjK8DSwlhCgD+IpSIoiDVZ7I5xh4wjfV+tekXQB9aMUZZAyhnHnsT6YBY332/T1pDiHQWyDOhqPo1Sxzjjb0C0rC7xk6lXGY2k2wMqoQtg0dhYvPWdeHqQKY2mCOSnWe081P/z2S5QAZTHzyRrQ2yl4w42wJrhBAqkXQ17eNcga/tnna3Q6UQ++lLTHOocETG0hq0ZRxEZsb5kSjAFPqaSS4FOOlc7snMC7GJK5JjDPbF8p2rytsPsnJk2d3JhrW0gosNB5Dixg45MAdPLnXopcEtudj7hQinQCwM0TNyYsXwf75DurNnjEYczlceXgb/ch+C7A8p9S/4I3QrqPr7UzDAWcf122U2REodE4CtLu5rxJoldmhK0IJBrC5lk2KDANIh6uY8wEyxvEXBTUDNdrSQQTsW7PLV8RVp34z0JYOdpBDhMB3iuu6QjaGXNjcaqroNNWBP6LGx/U+csQRRxxxxBFHfDLxDQFBiOi3+a9/n//8x4joywC+rKpfIKI/BXs9/B9gQmS/FsAfhKUvfyKOo6o/RUQ/AuA/IKIVwM8A+F4A3wHgez5MW7QB2+cEaacKGGNiJ+hJsH2z5ur8+syWvk/nHcyC63XBvi1Ylo5l6dhPljlfAcgzAj808BVFj6GsSAoBwtCTms3iXQcV9w8QUp+Bz91sGKuwY+qR+MtyCHrG/u42kCvlsW+yJihXJNORRUfSme4RsTLuyUNd8Y7xStFOZwLI6i/aC1K/g6++6n2b+MkQRwRssTWTQAFAvmrsK563i50UyW0kbKChS9hHAkNxbgcHUkOgY3JK4c3GQGpSwtaOwVAoyXWwZW4TnJpk+9+6KOTk2gORq6w2qENcMQ5SOuosoBCYpJ2Kha8nnHnAIk55k6CHO8mU9JbrUIGPSSvm+UjANFbVGVMbSFycEYCcY26O47EitRECaEoG0614bcy9m2OENAWRXZ9bEV/ekFohvFkbpTBFbvGxqtFT7ztZAIbaZIz9ARP+reyR2ra49xQvd3BCGfcAZzQSXv/eNUc0AEN37GFP8HlHuqvogsHuqXoYPmbsgMokOurgBDvAMAFRZQyq9lEk9JM+hd48B9S0fgBnl4VddFyjW5pZHXeh6floFyoGHAYy7gAF6itj/HQl4CTY3+ABhPrzSncCX8fztgIZ1IHlOUyDhnzebGMbcfA65zoAXcczQQnDftyPnSBo9C/uZ6WXDDRA5Ey/psAq4FUmdzJidfFTgipBemGbRT9cv6g1SUczvTmP+HNfhdH3BmEpbShj3tQAxQIaSuvzffmaxqfpfeSII4444ogjjvhk4hvFBPnzN3//Wf/5BQC/GUYr/V4A/yKAtwD8HIAfB/ADqvo3b/b9XQD+OIA/BuBtAP8rgH9UVf/6h2lIO+/4/Ld9FQCw9UEBuV4W3N9f8Svfeg8MxXvbCadmb8TntmNhwdcud/jK82dQjRdRe4Fsb9uL5uVhxf64WBKrNCwPYckab75iuzdbnGYMQc6dnQ3iydXiWRxHu60tsltmTou9OMc7a28Mvbbh0BIruvEOHAyIBhdt9e3iXb5rJv8UgAvNyfGU6Naf/r0sAO7sg8YjQYlIcKUCP3WVHjCgRQaIcUtAZg06OTLBoUx4aLjSAMl2qe4s1C3Byb+D3bDNCUf0bSqvAHKVN8cGmJR2qpuLOmtEFF6aBAPBVsXwRr4ZT1aAGH1VyGpzhjedxU8DAIqx0zG2eUjfhnvZLoA5L2FJG1wUYAkuoOsimukeUZ8chCz7IF/lp9qHGHuMHDeTXD9AOuAUcOnJ6rnvJ+xASzmGlrkRQMAE5MRYahmzGD8/LnUYmLSoiY5W0KOAgrXf2bYQIvaSpOzDS4CtYcU8H0ehIHaGB2yeilCWXrFbFfNOE9A4gRGLJcfcDSiLkrRspyf+Uxfi/FSuu4NBBAB9zDcUoCVzfMEAalzIVxcT3xTRmeXQtZ7ZwWCMMYr7m5AMKpKnbkraHOw8C/SuY1/9eO5kxDvAK9Be0ATwqZ9qeT7OZ8+EYosd51jGfdQFAxDrPqY+FnYNxr5ZEResv3bz3CRAiZz5pi6eSuNCMrAs5jy272yYURuPbnHAZdeGfWvgxUAQDmt22L9HcTBVYNvJWIblxt0Uk/OVzdVxrfj0ywMEwafofeSII4444ogjjvhk4hsCgujtctXT738QwA9+yGM9APj9/t8rx5vrFb/p7/pZPPQVmziTQxp+4fIM75xf4Dvf+DI6GF+6vJX7LCS4b1c83J/wt9a38aX33oQqJZHkc88e0VhwvVvw4rLielltRa5TvsDqYwNdSmLhZTDpJKDD2QBNDYhgpCsJN6NBg4xFYC/Ami/SbR1yFlADVfTCT/UFIkkjhZIMAkO3+vAAWQAYZZ0wSkky4RiJovL4Hv7O3e/0yaorEKvVccKSiGN8JsCkRVJLlZQAxIq3J198xZxcnkdCmUCHFjBGAbq4NknRR5m0FkqCXVfdcxh1JI/qjKJcmSb/H2uCYLSX4y8orJ7S7ohm1yU0P6STlc8US9Fg6STIRQriodkBlNwnmD03/UvGTCR2ZZxzNdyTOW3O9uH5cxs7ZwAIjXyq9qskzxU4y8Q7S5t87hdL3dglS6QUaQlKcGAgwD54m0LjIIAaHnoP01iTzVGEpTHDSlZy4lnC3uu+Zf9sU+2geB+qTeptlPZG4p/3IwNgQffysu7PCd4GC0ZXzbmQpV1wvYsOgIwRAYx5knoyDgAEsEUVjChDo2TAin0YcyzmggI37BjeYaV73V1cWknwA8zM/nq7K3vG5yMRzZowwGCWKECblxcGCw4GPOhioEL30izaa2cA3QjtEQmOaAOw0nwPCIyBov68KgAYKdAeYjsDJyfr75wTcDDJwNhq/6sN6B0gYchG0JVzP1klGR19GxckmSAbT6zATkBfBLQo2HU/ZOlWWigGomgn2684LJHrEenJ74nTYKMAwHreMT8oXs/4NL2PHHHEEUccccQRn0x8ZjRBPq5YqONbT1/DRcZQvNfPeGO54r5t+PzygE0b9pVx8exh4Y53lhfA+hyfWx7A9KvwtcsdXlzt+zdOV6zccXf/HM/vT3jvesLeG7pQsk0ulwX74wq98pTojTdhIMU4FU63HyvY0ptrjdiLrIgBAbEiSaRoi4B8hbDvDTstdr6IOG7akmKAI81WxDXaRFaWQwxIJJmb15FH6Y3pwyYoksnuCnR2AdUNWb8Pp6vLMpLw0CEB7DMGkoI/sQhQ8Bh2nKKbGGJ2j5B6DCGk+USvwpOcaEck19PKs46EJvaZxrDES5jvo9QIxYI0dtzYSmLmjecDLCZsCNh11c55DcCmwwEhoNuqeRcDqGJ1nmCgxSiLGYls9iHLpDCYIQk8kSdzCl38eq06lX3oosl2sjIMHZa6WcKAZGwoxvlzlbyM28sqKIIxQApoAEqetWXi6fo0CdDEMX2Oa8z3+LIm5C5USUoGftyUcuTcCdCq9CE/j3Ht5ctkCPl13mLCWjKKCkzGY4ji74qO2bj3fjNQ7ABqAEexrwL9TGiPtn0wQvppPFZMoNQPtWPSjwkNoBqzyG4AKKN9dj/Z/IrSJANwZ6AktE0ktDVuRGqTcRPjmeNdzrUTcOGhPRLBPgcWxc6UYG0cVxfFJgO4iPs+NUEuDuT4A0a89KgFmLQDy6Oa9ozYfcEvebbYxg54nIYgdAAvfCLIo4F3uo79ZWHsYfcd9tMVJPL7vLLVdDE2VV/sIH0tN0BSdsa/D8GKi9IluutY7jaIjH8flmWwG4844ogjjjjiiCNep/hlD4JcZMX//fgOmARnzxIuspgmAIAvXd8yEERbMkU2ZTTc4cw7fuXpXVzeXPGV0zN8+fFNAMBb6yPeXC94e33AQz/h3f0MUYIoYfdjPPYF713PePfxjOu2oO+M3tkSWuD/Y+/teiRJsiuxc6+Ze3hEZGZlVVdVf8z0zA45nBGHwqwIQYD2RXoQBEE/QW/6F/pbehH0KIALrAjxgQR3R1wuudOcHnZPd1V1VeVXRLi7mV093GsfHtXUUBC4u9PlFyhkZoSHmbm5eZTfY+eeo04xDQU7R04sU2C0GTex7mCTPbSy04SZWeB9gjdAJHi95ESWpFL9XWIDkLC1xZpwtmUHZVfYCQRJdT9EkzkBgJM1MdUEtTxscxVkTR2VZKMkCDPgGkygsARaJ4xWU4SgQA1XoKMAHKiJBdlObnuKAErJQNlpLkwE1Easy6zDsBDHtGMygyUnd217ZUxk2VyHstMrAiThs/aa5CaJ1euzXhMTRST/zqHFqQZQoKoK9aLulguq1kWLNOR2oiWMqeqmcGfXqNPSHcmuRvkziWwt6ETR0UHQCO+CKmtgoSCaB17ni6y9UsLQDrFoYCjLhWZaAiksmrDmvtB83hJaIlrmqLGu0QKEiCaZAllOcAYXsqBnG0LL+9USVXXjqYBVy37R0iFZlIOVyNenbbMAlsa+IO2nsGEcFXAlgz2yFaR9HRMAoJN6D9m1A6BsNCttyUK8i/If6Pwk31xDPgO07BiKBoIEgFtHqDLvdpx99J1qMPkHwMYGzKAEYCIkhyLMWsCu/N1l65WpMu0oAdOjeo/rMc3UH0nH3jDHeGrKigzUoAZ4WYCkdr9nBpuWn9VjUweIMbWSVwAj9vW7SjyQJofUibGbqALVpRGUa71gE6X8/wdV8CiDgk4gm4ruxkiAF3Af4fuIvg+YZ50IIh3ybyFJrLHGGmusscYaa/xOBv/2Q9ZYY4011lhjjTXWWGONNdZYY401fvfjvWeCnILH//3mQ+y6GZedUhiYBJ4TxuTxKu0Lg4NtWzSIg6eInZ/x8XCDK3/E1mn5TI7L7oQLN+LCjbjuDmASOCTEBnc6xg6vxgu8HncYo0dMjPtRvVOn4DFNDmHydccvUdUMCYTik+vMSSBv3wFISCqmFxmJlRGy2QQ4271kTkiJrW6ckBLr5mJDlcisFJguSGp23vUA+2k75Lk8IlOqC1277H7rzni2hxWv2gWtMKIKcDZlFNZ+akQKy46sNDvCpnMhDQsj6xOg2VUmv/y7CIU27ZXP1aGAk22+57Ka853tVNuJve0Co1Lp8zVMMyH1KMKWwrrr3DrstNR3Iar0flbqfauBoGwdATv9BwjYJb2utuub2UPMxtYQW0f53ExvRF0kCBIYmKsbhXRUS0y6ZOUNzTa9oDI8slUoVaHJsitPMMvYpctKsbTN1RzN3LVdoGXpRCpzrH2gMG2kpf3nyJbBZJa1eZm3ZReteGU6ayPZbjyTsnNsUO/0Y/O6sCcu58C13cyu6pSJUCfjbE4TLdpaOOlwI1haPmPNCClzaRNBmzpeAFU4GJnZZUMPBJkZ0al2kIyq95NdhfL4BLK4V1qZj1zaJKaPUqxr8xAyW8IYSRzsfBo9jtzmwtmlYYpwaFgOAjhCsTJPzqyaHb1TslXO2dV7gxKZW1PL6uBFCQ1IwBPVc8nXLisAN/d+PrfiUARb3wLVuYGySLIeC89Qpl1Cw4Yj8AiknhB2Uu6l6nBT56xosuTXimYRldImySVLDuBBz9s5rXlq/stACK64KQHAPPX1/4A11lhjjTXWWGON71C89yBITIyXby/Q9xGHnWp6XHQTtl6fYN+OW4TEOEwdeh/LZ5IAl5sJr8cdPt7eYu/HAqIcY4ckhNugJTM7N6GjiF1WKATAlJCE8bg74Jv+QktsSPB62gEAbqcBN+OAm8MWIZi9YaMfIF6AXLOfH4AbYVRJVoIAYJo8nEvFyhcAPCckUSFUffgVmOGBfj5UEb1cIkGJ300q8oN4Ts4EC0CjBQco1KQeQCmDacsBpANyxkOxsau1pIhnKmMsLhWNmGfcYEljb0tdqDRdfgobPZ0yAIOiiZHfz9PKoYIgaBNwS2iocb5o3WfIgB4SgVAGQajMURZUzSVDqWvAppxs5pISNn2PDMI4MS0AAbwCFGRlUZw1KBiQxPq6nTo1II7zasEsiW0sEYkYyCK+rZtJAQmakgghS5YqkFETYRujAR+5dEJIKoUfTfKbE/k8/23JgVCdzxYsQE00S5nIGf5GgGpcgErpgrTHtWKf3koQWmyiKUvhdNZ47mBxvLnktIBDYwdbBEmlATVaQZkMsGTwprxNxX4WqO2V8Zgtbi6XEHZFc6ZNeCmXuzWvFTCiE0ifEDtGClVAsx2igpFUgIhyJTPgB02+pQEsyzRlu2sDhIqbEJbXnwzALHbA1k7R62inP89Rr8BqtutW8ECW1yqvL2eaR2fvyyZBBiy0WlJgxAbQSd5+YRQArw7G5sS+A3gytxoDhbOjD4+AP9k5zagAt61NtUQmpG6pmyTm6FX6auZ+4WbUlAaqkGoDCjkVQY2RkWZW8LsB0QAAgZcuWWusscYaa6yxxhrfkXjvQRAACJNHauxxsVOHGBHCzXHAHB2OD315OwUGOcFxN0GE8OZii6t+xKPNUdtLjCAOY/C47E94Ptyj54Cn3X1pY8MzkjA2FHDdHQAAA8+49vr7S3+Jwc1wJDgFj9PsFzt1IoQYdXu97OZStcgVIbgM2gSneiOiYncASkJM9juzskPgtYXYamKcJ1ltwkDVFvVcJ0E6UYeFRCUpoDbx5CZRbhKxvKuLJEVnIydBCaLaB/nwuYIg0gFxUxMaikD3QCXBliZxyCwDTWZQtDyqjW49j+wmUwRDefk+YImJbYlnDZP8ujhtl2cCRwEfARmt/46qZgrr7i8gi3lp3XfK2AtIQPY5gXiGdIJ5Q+BGs4PY9F+azJG4Jl1ErKwg0xQhVjciaYUa8mejsUgiVTFK04NBm8Q1dp3S2cRloIEE1GZvza+Sd7gNXMuinTmRzWKb+lpdF9kdx4Wz9hoQonxOjC1CaERBqUyxZD2S5ZQ1LKGa9FbhVVsfZb5srRv7CYC6+rSsIxIFGhcsKKAIplpSW1hMBgqKGAmsARybM0aeeIoEOjgI14PI7l9xpjexvJntXETFeLtQmUEGCJCxMPIccbBxlvZRRTeNubKwuG3XElA+217HDE6Ve+4MwChCu99ybSkqGUkcVJcm4h0bYYiCuuU7K9ECkJM+gbqkTCa7HuIjxDfjzoytLEzcBPV5MMqwihMvAQXR68onXYQcAHfCAlgtWMUEhEHnPLv6pK5+X0gByc7AigaYkrmySOSov8yD0zmeVLuGzWWrtXdeuGitscYaa6yxxhprfIfivQdBRAAJjBiAQ9gAAObZoesimBNOxx4xMNJ9B7YHyG4kxI3geKmZ7tfHDt90EdutMj0EusM2jR02w4xXFxcY/Iwnm0t44/Bf+AnH2GHrZniKYBKkJjlgStj5GU+39zjFDoe5x5y4HNNxwpwYMTHm4MrrbE/7UQgbA0Hujxu1i0yMZFT+GBnMAqZcCmGfN/ZA8qT0fRLAEYTrMSUnEdSH5pyUpZr0yUZApG+UZK6ljjeJJKEmO2X3n1H6zGCIMIoQJEhFO8kSgbgRpY9bZMZASR4tsRIvBQQpFpnNw3/5bP5pu8/cJjERi+MKiySTCNr2LUF2oybxbkJhjVAA4GrSqG3RohSk9GljcdkxwuYodcqqEKdOE0mA5Hh5bYAFSAGWwvRIvgEpbP5bPVA4S9YTSoZKgdSNBKiMBbMk1XId1F1nL0BmE5Ad34AwWubT9GdOQkL2WWQQRJStYmyPzEgAFNRhUQHLdi7RXIdSgtSADeWWy1MVNXlOXsymtbkEeZ1Sk2AW21NZvFGsXKkeQ0FMKJes7CejVHmem9+lgodtKUphu0RUwcs2qN5HZMBbWdP5PAUA13M8D+lNKLm3konOxI8B/U6AXhfxBJlpaT+d73Op90thT+UhNgm9TjoWYsd57IsxNWBTdKguQAbqZEep+oHaJp+XDEHvw5RLnEgBqgqSUjnfwgg6xxnydVgMOn9cS9OIk37nskBigmTHlqDfpcKEGQz/QIs5qpbXAjplJokU9ljqCHGjzDfxtLQAbtdzM24x4DbPRXJcgCbxFXjK9yHn75gzYHuNNdZYY4011ljjuxDvPQiCRKCjs4dazVamg8fkBLSJmnAFrRHPNeH9jT7AhiNhvhDITJjZY77vm3YBmhgPrsdhv4HfBHw5XKFz+qS77QKOs8e+n7HrJnhOGNyM3kCSQ+jBlOA54YJHDG5GEFdcawY/K+MkOTyE/h0QBAC2fgZD8MrvcZg6jLMvlodMAsdJd/4NCIGxAADA+YSUd7DbxIpq8ppZA5KoWLdKmxyw0q9jRyVJpzPGAKBJ98KFoM0trGyi7LBnXQzo75I1Pgzc0MTN2gUwM4NSZYfwTEuGgRPTqzCLVytPKI4bMyv9PSx1Qs5BkOSX+UfKOgxku+GRlAkyAf5AxTmHraSJTNOEAyBzbb+YqeTmBIsEUfUSNGFJEDAIYNbzKgm5/d4mNIRG80ATolLK4LAoQ0GXqlWnJecUqLI5JCe6OreJbKDlOi3novSfz0FgJShokrZ6XGlDCNLLwuGnLPdWCyFrtlBdK+KB2EvR1aDzBNbmVbLVcxa6yOVlxhRKBL1HMojS6DQsds0J5RoUJohT8IfMGrokzQUUpHfPv5mDck2tLEhZYM2W/9l9I0zmwEJ1jvI6EANhZPERHcIIZQ8Eu1fatZAP9AZKOdWOKWtJBDQTmAhuzOdRAYZSQWUgUSmJM6CoABblO6bOPQAku3dr6ZpeC87slwwatfdNU6JTYlbWUAZjOFTwFrOWAWU216LkKM9zbrPRkcnfGSkoGEmZJZL7LqVOBih5QdgprUW1gfJY7J99r7pJ4I/1SyA5QtgqAJL1UnLZ3rcx68o6C7B26nvCwLxX/Rj978/mMRk75VtAsjXWWGONNdZYY43f9VhBkERwD2f10PZwmLYMupw1EdjGIpHhjg7DNwqGHD4B5ivbmZ3qwy2ZjgafGHJizIPH3G3UWhSwB2TG2y7CdRFdF9H7CO9quUrnInbdjL2f0LugJRX2oO0pAazaHjmYUn2fE/ZuApOAKeGh22CM9XJnm9tT8AUEOaErrzuXEIwp4LLORE4Ii71r/qm/SzIWScPeSF2CRLXfjTsYo6BNnslYJGK2olg8tJfd+nbHvd2k9VDmQSNWuSiH2KjuSWGdZL2NFogRaKJoiaXOj73d0PhLwuFMXBM10RInRdsDBKRNWrQPB8ROd3OTB/zJQJBRKe/lukRLaNp5KwAAIABJREFUPlD7RWoSnHMQxAHsgGjzlgC4k+5kL3b5WSpzIp9zrNchd0WhJp2lAmZPZee71WFpk7tcukBi2iVdRV3IQCXxqNoxJHW3vSTmZ8BEM7ZMYxAv+q1VysKa95kL66aUs1gCLw5Iu1j6L2Bdq6FgQGgWy9V2KrSVvCyAtxaooKbspZZQCLJuBGB/s0AcV/2GXFpUGkItCTIrXGrOUaCJv4KJorUf54mq6KQLG3sge0634Ef+u7nOed6y9o7kEqEua5BksMomkAHySVkj+VratZee9DyBsvZyHxAYk6NJ2g2MkChomVbl/TyHVDVxKEHtfIHl3KMeS+cASF7rBSipoAuVMiIFRVJmiJ0BKKUMK3//OSl6LeU6dXYeBhoKS/0OiyhgnPSCgITYE9yp9u8mguv0WvGk43ZWQudmAYiQMijWfDflaOVl8t/a7nKNxr4pVXK1DSHAH7/9llxjjTXWWGONNdb4XY8VBCFNbuj8Qdl2ZYf9hE2nFIBgrif3/gLDa4/hm4SwY6SOEXep7pgOCUIC7hPiwYPGSoNG1h4J9vBODjML5j5hHGIBGogTnBPcdQH7zYRdN5+Vy2j5TBZw9Zz+wU273kUM7qF8LkcSwu08YOx1GdxPPWKqrYTICInhOcFx+zn7aeU5Yr8XMKQACY3riBCcS0iJEIPOgQipKF9OBifbas+JSrS6fbe8PkUEkaxswkMf4CNAM9ft5iYJBizhsYQ7MzE06auU+upIs9w9z4mPahZQsztv7c1U6vQVuKjzmDUkMttBxU/tTROIZKCACyrmauug0SgppRcNYJcZDzxr9sy+GXMjc1NYASZoWhgmgDE7at8lEbdznCdWnZaMP2Wgqb0mWYQx2a48cYMvJNMPqUmbXZBynfRAMW2RJThTfs+f9UlLedr3CUgcS8mB0FlfAKi5v7Q707sAtF8SpEjARJpcLxg3ti4zUyiXUZwBGIuf+ffcp4OCn5zUtSOS9bv8XHHisfUgOUkVUi0X2PrIjJC0nMeidWG6FVkYNWu4iM0xWd+lvCw7pcymM2SgpCQqYyhYo7CJ8mIZrJm/OFKx3gRIxwVEWZSR5HuzncPQ3PcGeJzPZ557AZVSGmlZG23kOSwgDC0AAopSyo7azyAA5KvDUbknmjbL71KPAQyAyOBL1mAxQdvSRSNmLYOybdLA9nlCPBkokhQQmR4RugebtmPtB1BmCEn9vtA5rQSsPF4FUu27MwmSp6IvlMWfCyDCgBvP5n6NNdZYY4011ljjOxIr2XWNNdZYY4011lhjjTXWWGONNdZ4L+K9Z4KQF+DZqIyG0GBCgYAuYT9MeLa/x5wcjrNu35+ueozXHrsXwP43CRwYh48Y8cJKWfqIzTDjB0/e4G7a4OawRUqEELi40MSDL/1QJMjMSm8u23eMAGB86HHsN+g3szInGncYZsHFdkTvAzpOcE1pTGaJZMbIdX/E1s3Ye+VUj8kjJFfe37iAcfCYTBclJIcgjDF6MATJtgRD4sL0yEKtIoQ5MkJ05W8Ai/IZIkHvAxwLgok6jLNH7Bhh9khR5z+7zQCN4GcWTswb6nO5eiqUaCyBSn2v25eqQ2IvZYFIY33kYNt9dmPtY7GrjrrT/K2uMM3rVaugYe3Ye7FTtkjqGk0KY5lkB4us1VjKcrKTCVAYIIsd2whwEPijsjhSR2XnO4+tiIxyZr9kqn8dO8/GQGm1TnKFw0wIu28/7zxH+XghZRvp+ZnwqkA1JPLajg0NpUxSpnq050mLkpNFh2yT1bJJHCAumk6DirsW5w6qzKTCtiIppSKS2Eo5BCmRMmIai1G1hLWddAJAVei39H+2ZijanJc1JMoAyKULZwyCal/alDlENJoeosNhGJOjYVcAdT4KY8HWQp6DRqy1HJOa9gu7SJB6WxeTWdga+4pzP2zuMpn5lD+bKsMBvWrJJE6qK1OuXx5r7p8XbJC2/E28LMo0zn/qnDZeQ4Xx0SyxlvaUD8t/GtvlHZ2dBDCMAZMZZ275WXFSdFUWpWct26XtM1cl5TnLh3iB20T9/rcOondIHYMSIUZBGgSjaVJlTSGe9bsluaqlknWL9H62dR7xTokbJULc2PdRdkhKgGu/x9ZYY4011lhjjTW+o/HegyDMCdfXD5ijwzTpdIhoyUYcHV69uMLbYYdw09dEYCKEnSBsCFefn7D/DePmvsfxqT45jocBx0cer7cjLjcjHj95jYtuREiMyZ443562eJg6PBw3mE5dfYjPD9jREoNACCeH0FuW3NL3u4QYCd4neHOCyQBECA7OErzeB8x7h62fMdvT+iH0OMUOg5uLfsiT7gHREtdgYEgCIQphSh5RCIdGhHVKHkyCKTrMyRUr39mAHtc41bQ6IjkJ9S6VMUZyCJY0SVujPqM6iTT0chuclnEYUHJev04AJJjjR86Dcq7dCGlmZwZKWJS/lEbQfO4scmLDljCTuZNkkcOSqBLgmZA6IA5UwRUTeSzCkW6ZUOVzUwvLmty1CQ7PgD8mUKQKpjTtZPcHMf2I5AHqmuSnmZcK4tQxuFMGUVDWZ/LNmNt54QpeZI0ESow0SBVoTfpaSfrb5DxRBYjOyl3Ka2bTugC8jM9PHqCsgUG5cRtz0EUipbSm6UAIaIAxAUCN6GXR0kh1vhZ6E3y2RDLI0ZRRaOmSLOa5fZ/y36U2Sl8s77e6G87ca3IpGaAlMjm5lrw2CDTbvbxJdTqSAhuttgUAJDGwyjQrSqXNWL/78rzTTHUszRjJylOSpDopoY5dF7CBWGJz2paXpLq2cm1PEZcteib0zpzqCVAtzSqApqjbCRYftfE216C9Tvm8o63nhFI2VK5ztv5uurLuiu5H6Y9k0SaoWRusYqq1AWipUa821ZQIuJiBK21gmh1wYvCJFw487U+eCc7spPN3W2rdh2ydtVotHFDBYQLmHS1PbI011lhjjTXWWOM7Eu89CBJnh9u7HeLkihAgggoXbl45bL8i+GOH61+eSrIxftDhm595pE7g3xxBn/8Gz3/1BOH5FQDg+NGA4xOPu0+e4eUnAU++9xY/up7xuD/iujtoI5fA1+MlbqctXp92uB973D8MiAZylB1VmB2pPVFT454gYIRTh8BSXFqKYOSUM1TgtAsYZw/nEvabvZ7D7DHOHrvNjI0PCHvG4/6Iral0MiVsOGBnfx+iOt98jUsky1AGCdi4gDk5TMnhFDvcuQ1OoYJJRILT1GGKjHHukBItxFcdCfo+2DkkxOAWu/eSdUIAfSCPhNhrUscz1Ili0mOz+0eOshHcJiMCPSbvNCdNnJGAuLUki2QJgpSd3/ragjAieedVtTW4YYvwSMvEM2bGRu5fx5M6IDvhpA7FzrUdQwFspK4Dnuv5qaBnXTfZxadooTrVAci7xWVnmhQoCTYmEiA17iU8mlZIBkFsPAub0zxUS2ClI3AGgKKxnVjKjjTNdXe+gEB5J7xBEzIgsmCENCyahWpoPs6YRuRSFU5NtNQbydG2G/RaUb4mixPTcWV3npJcxpyM1yS9AGpnIEdmkOh8S9mFPxf/TJE0CTYNCWqzdhubsICYIJCCQlXmA4qVLs/1oxz5ncR3wWZhgE7qfJUBE3GiH8hfJyMW6xmAMjEySDGTMWYAClwBn4Yxo4KxKOKw77CrMpMnKeNB8j2rnS3ckgpjphFOJtPg4ViZRDm5l3zvZ3CJDBRsrL2X17qCNM7WWMpCqA1YsjDzCVjgb3o+9RpSBGQG0kzKqAnqMLYoTrWbNvcvgeG2ehKumxC8RxoYKX/PZyCsIG56LSiQCb1mwMgO96LfV7ZW3YmMdWTtJGB8gjXWWGONNdZYY43vZLz3IIg7EHZ/ttNkIT/QzvqQvnuZcPG3N6AvXyK++qZ85uLD5xivfh8AMD3dw//lDfD2BvS39v5uh6tnHyB8/Bi3P9rh5vef4s8/eIJ0GfD4+R0A4JOrW/QccNUfcdGNeN3v8DUJjqOCDSkRUmSE2UFOru4Ko3noJbNgDMa0aOwYKRh44gWRHA7TACLgwQ8AoI4to8NxE0FOcHva4GoYcbVRa5KeI3oX8Lg/wJEUNsiL4+XCgWZqSmp2fsLOT5iMCXIIPaJo+cwUHKKdT47NMIM4oXMRBKDzEXNIi5If6YEwVQcbmblYpMqRLaG2vxnLMo1sf2uCoLlsIydZ5yG9VMTAot1pFyeaqLeft51sRAKPXHbXM8DgD5X1oYKny3EWgCbbqdqudtw0AzNQhkSqW0iqbaaOELaE7kEKKLJIeByqu0wW3Ix1jMJA9M1njD7fgkfuhEVSn3eOyzlQbS9ly9NSWqOlGyrOqoBL2Q0HIPFszFTnoyR3E5frSYmMDSK1xCdfF1Q7ZXEN6GFAzIIhgGa9cJMIl/4rUJMnoohqWslEqcaxtnPCSU3S30ZJvBMZQ0cWYrwCLUlIjcDtQoA2UHFGKUmzZeDJN6VfDXBX2k/NcBpcsbSTk/9oIFuZBymsIZ5NTNOACZ2Ld+8lSoAbqZRlFTCGVMiznRrJYAvO701N1EUap6NM6Voo+2LJ7mjAIAgV1syyUwUg8vikqP428x2hYrZ2vxWWRS6ry4wTwoJFlEGTts9sYQsoSyyv9+QB9nWe9ABljaRez5MSgIMrXXAf9Tr5VJdGHkdhVwlEgDQbiGwAXxlEJ0gHB5rsvrAGMrCpIEiDiqyxxhprrLHGGmt8h+K9B0G6txO+/79+AaRUd+tiApgghxPS2xukeVp8hrYDtt8EnK4dXv18g4/f/BHkz39R3k+HA9KvDqAvfoPHnz3B4794BOk95idbvPmJbq/9zadPMD0L2D094NnlA0Ji9D4C0L4ca9IfIuOw6ZXlQaLaBQDizDXZEbKdxEq5lo1mKdJb5pM0wZLsTpNfO3ogEu5ODoftgBfuQvt3AuaE3WaGM72Rw9jjOHZge9BmloXWx+PhiE/2N3jUqX3B3TzgPmy0DCg4zNEhNrR0MvBEhFRHgtRGtS2dYU44Qc8xie2Q95qNJO8gvT7II+/MNtR4tVNV4CIzLiSXUrRaEgQQC5zpFkiTKEs09xroMdwlMLeCAzb/Qohz1UvJbJZ54uI+wSOZi8tyDfJE9Vrm8bS749R8hiqbAtBypflKEHZaZrOw121ZMTlnlArGFMAs95myvgAWlps1AUVxk+CpjjGX22SWRE4CU9fsSiMn1UCKUhJDwJYtQ+fZkmtlxtQxuBMh9dWNhGbSMoXchZNiS1oG0+6sZ/tloAI57SXMJRcsSEOyXfQ2kVWwojBLBCBPDQMEVcumfuQd69J8fZUBcrYQMqkjqcaDtIl0/nhZetnZoznPxooWpN8HsWvWUsOeWJSONWBXC44VR6ImQY8bdTYisfvL2BzlpL0gMi01OdrIFTnN2sqsGP27ASOIAFtn1LDD8gFSqEotoCIKwhGQDNhInTIfAF3bxa2pLXdqwabFuhdErtc2z0uubinaOa2mSOO4UvRt0IAgweY/6L2fOgNiip1zPdcMOFLgalncuwrINte0BaMorwvSkiTxXMvEctsbAoghSRDAen81l0ouVhDkP7X4b/7FL377QU38yf/5R/9EI1ljjTXWWGON3+1470EQmWeEz371D75P3sN//3uYP30K6TVbePmjAcJAGAi3fzhjfPwIj/+z/xqP/tZsaG8OoLsHpJtbxK9fAF+/AKAs7I/+6kMAwPTjj3H/gwGHZ4/wxSdXiLsEXAbd5QPQ9wFDP2PoZ3Q+QoTgXSwsjMPYq7BoZDiXEAJDEtdSE58QA4OdlZgAC3tc51VPJIxemSaBEQ+EKLokZksmD61Y63yWkQH68N/pMTcXWwDAB8ODvaXABZNg18+IEtTK1wRcHQuOU4cp+JJctha7ABBMxyFb8BILXC4TcILUJ93tLLv8TXbrBdSlStXPu9BtBmhADuVEULAoK9IPCrKWhGoDtEKMWorkOIGt/IINwAKAFFmZLaKlV3Hiykiw+XPEmiSF+lrWuM2Wtjo4ejcpFiD1QNoAMwlSXynuLXghrOUyPGnCRQHFVhOiyRgFgB0Km6BlqxQGRM7PGraLNO9xsF3/pOPKny+slE4ZAsnX9ltWgepJaElDe67uSHBHQuqkJOatuC2YtLygJLO0ZBVkFse5rWsGgiIV9gltA8QzpF3vhKLPUMALsdIVAHxis621txyWjAQs5/DbmAutDkspFWnmvAw5XwORRTvCsizzMhbEMrN9t71FxFpGhUilvTzIlDVAAEvCDagoZUtYWMHmc67XmioAR/n4CgDoOmh+97Rg3LRJP9nf0s4bG4MigweREIdUziONBH9owAugCAmfl+Vk0dcCNuWXZwOpCEXkuC07ihtZ6MYU8dS2bTm77g24kxEWatgbFFF0TWSu7JbCmAIWjBeBq9/b+X3hOojcFgnggbRLiCyLz3Af39FZWmONNdZYY4011vguxHsPgpD34E5LRNKkT7K8HSDjCH78GOEPPsGLn+/w5o8D/KW+L3JCetvD3TOuPrzHH/zsJX75xx/gr798BADYfnGJ3VeCiy8D9n/1EuGXf1f6C199DQDwb2/w+N/u8WQYcPrJRzg+73B4vsF4rcfNO8HxKoJ2Ed12RtdFXAwJQ6dj6DjhljaYo0PvA4JXZxZv2XPnIxwnTMHh/qDn5xoh0othxOADjnOHw9TheNio5MZo2cLMQARotpITYyukIdVdWas3x6gP8vPJ4TN5gld7tRK53ExIxuDoXERn484lN2P0iIlwHPsCdpyDIHFisE/KyBDo7zbEfD6BnL4fySjszcO/ABJVsyCZEKM0bBQk0pIEgia9bdkELKEjKaBIIofkm7oCUmCGXFJQJrD+ndkyJOCsp+ACkuelC1Eys5SZwLQUMgQsUYr1dWBZbiMMpAmIg/YXNwIOpCVCbTcegLm/JFJAqHWoqe3XXe6c2JXL0YAqmdEANEmc7YhTEvgTIKc6xuxQkzpdM6lvQJY20RfdCZfMBslskRnwR6heRQ+EnRSGTf4chZoIVhZDizYogAWWigPYyeXEXBhgLxBnbh3tWoGCdCWzz24vAKJzRY9BWuBwwSCgUu6U/y5MCijbpZT/5M9kxkIefz4l0uSf0OheJFLmFwB4K5Vrh98K0RaWQu0zl45kAdbk0/Kao/mdgCWS0/xkKUyFPF9xQ+WzRUelFTtdgEJZWFcQh1QFXLHs8x1tEBig0kll6Mw2J5ll4QlRXJ375rTqPNR7oJTLNMCOXgApa5SjLBrKIIz+QeCwgD+QjC2VHV20RKgeY3q6RWA1M0sKmNasmeSoMAClcSLKwE4uByzXvCnlycw58QLpEqhPIJ/KKaaZl9d9jX/SuNwf/z8zPX5b/EPtrQyRNdZYY4013vf4lq39NdZYY4011lhjjTXWWGONNdZYY43vXrz3TJD5yQav/8c/BkUprh5hQ3Cz4PgB4/YnER/++AX+lx/+Ka6dOrv8ye1P8dc3z/Grrz+AdxH/7ZO/wf/04f+FL3//MQDgL+4+xb/+5mN8/s0l+r/7GB/86w+xfTmje3sCv3yrfXzxJXDSrXL/91/g8fc+wdVHjzE+UxbFvGfMO4c4OIyPN5guBS+uE/BIt+d3FyNiZMTAOIQNJBEkEdjXreTNMOP00AN3HZCA2QFi70/XHk8f3eOj/R34IuHNfoc5OhxnK4eJDuPYqZCpAGly4E3ExX7EbOKmITik2TQzRqd93Gzw+qB1EA8XI7xX4dNNp6UwnYu47lUz5BA6pK3uTI7cIQQrHTHdEwFAeQe32cUsm/9JWSMpWDlMPK8zgGqg5NKQvIOd0NYUqP6L1d3Tt2h2KLOgeT029RTWZmLd+UYgIJH+DSBmTZbWwUdQdlxhDAmZlcmStSjybjklKTobbqqlLoV+L4CbTLCyM/ZEs6Od5wEkEFeFD9GjrRQxrYSmdIFqO9lZopS10FK4tS0jIJurNAvMWEiZIFlUMqp4a5qwKLdpyzSS1x3tOKhGSQ436o53ClRshgsBIUGZS0JIXsxBB8vyIasukKwlwtKUWWjpSRJ1H+I+wvWplLuUtZdrf5LSBcr67KtIJflvZ4LIzFW3x9gOaoONcmxh42QBXFTGRKsFAzYtmWZulXFjTBVn5xSoKS9R9kqh7hBQa5nQsEOsz8bFpRUUXWiAoK7Vto3M5gDyXMjikHw9AABHt2D05PIonVdlMRQtlkLRQmUCUZ0jZG0Nu78kZe0k68wL4t7YbGHpNEPZMtocflrh1yI4C9UXyULL6jKj7KRzzRXxxtBoxWntfYpUNWya4bVnSVKZYECzlvN47VKKCZC0gsmUqJ62iexSxDtMEPE2Z9Zo1j+SRJCj+3ZdlzXWWGONNdZYY43f8XjvQRC5jHj13580kbZwRin//rM3+J+/9xf4tHuND9w9/v30HADw94drPEw9UiS8eXOBf/nmx/gfPvgFfjb8PQDgv9r+Em+f7nCbBvzin38f//t/+TN8+ffX2P76GlefqY3u419cg37594i3twAUFKEXrzD0mqVuO68ZJRPw9AmmT67w8FGPw3MtbTl+uEHaCGgmDDdViDAnjSRAHLa4uAO8uYYAgHg9z+PTDl892+H+n93go8s77LsJ3Av6vWZkgws4RY9D6DEnh5AYHwwP+GDzgMmsIo6xwxQdpuRxP23wzcMOD7eDaowAGB96zF3C5COOYwfvE3abCfe9ZtBBWPslwRhmTNEhpnodQmR4lzAGBWRSZPguloQoJUKcnZbu5MTsnNuUYNao9nfjnlCCBKAqcrkQrLTSBmlLL4CStVCwZApUEm/O2hQAYs8oIqC+Jt3S1aSMnECQIJ293iSZbclHnNQy1I0EHm14VorkpiYpKuUW1hcDWU8kbKUAGQu8KOq4SbSkRrgmbu5E1d0igz5d1aPg+axEIQFxo241db7q51O2Fs2XItR2hQCGJpY8oXxDqXCkusxkUAi+0cQQm6dU54DbMgrU/qojT31NywY0+Y3wSHuAh1jKmlSUmJp+9HcpSFHTTb5m5+qrucYihznMlLUk+RpSTVrN8abMYaq6JPlnFtLU82Etb/Bqt8wTaQmQNSOdlcm4diBNFGyQSmK8EN20kqw87rImWvmUZLa2WeCzvS9Jln3n+WxBENQ5KKUtZyBBvtYAiqDtoj3TcyEDJVvdDPikpSPOnIQyAJf7DqTW1qhrxU21jeSlAHe5pGUBYiSCC1r2sgDhcrmUAaZl/RVV4WUbKghrf59XpuT7t1nTC6HfBjjLpTxCtTRHmjWFBHUYS65+7wRCd1jLYb6r8f9WdrOWyqyxxhprrPE+xHsPgvQu4tMP3xQBTwDoXMRld8KP9t/g2h3wL+9+gl+8/Rifff0BAMD9cgueCNugWgt/dv/7+OwHH+CnT1QA9Z9f/RqfdG/xzN3iv7v8Bf7wx1/iLz/5FH/ywx/ji99Td5jb37vG/otHePTZiP7re+DlG8RXryBnTjQAgG9eo/vVgCcff4irZwqijM8GzDsGR0H/NoCj7XxyZRAkz/APM/gUQFNQ1xtvLI7rLcYnPV7954/xtx8+Aj0bsd2NeHqhoqbf29/g+XAPhiCBwBB8tLnBjpfj6+wpfRaHXx6f4d+8+Rhf31zqGA8dJBLmuQOEMLmEeXaIWQOBBft+wtbP2GxC0TTZee1jig47P+MUPe7nDU5BAaLRmBghOtwdNioIevK6++0E3OmYiJU1IJMhQ6yAA7m0EEIl1t1w5qTrgFORkiCS4oKTQ4QQzac3zA7JHHYwcqm9bwGEok3CTTLq9ffkAdlkHQfTLjDXDQBV24AFcVJ3nTAT+KT9+4OCBRybDf28o23AVwZHUi8mSCrL5MyOiZZ4p83y/dSjilOKJlZpU+ejCJSy1J3mBISLuuusQqz1WJ6bcZql7DvCoc08goCwrayDoo/yTvKsgryEBlxpgiOaxJEWgIh41WagSAjwSoDoKrNKEhlzo52cLFrSjONcVLf83rxX3DvqMakjkAMoSGVXcL2u6lCTk2P9KdQm+MZ+8VTB0DOXFjHb7DS0ujYNtcA0RkhoOdZWuNS+EZAMGLL7rpx/BnKmchNVZgfL0k4WUM2blhFBUvWGBMaWqv23oEnRWMnnklDswalJ6FsQJG1oCazkPpv7J6WztdX02dptZ+0d8SjnSEnve55RBIBJGlDSVVHVrO5a1r6Nx0xddOqym0wr6IsKfJDYmm/nhaH3Y54bA7IKs0qk2nlPBCRejJEC0D3QYu2sscYaa6yxxhprfFfivQdBTmOHz3/zBJwFMKHPixf7E/729VP8b9MfIfx6j92XjGe/0afqR39zD4ggbjucnvV4eOlw9/Ip/tUHWg7zp0/+GR5dHnA1jPj04g1+tPsGz/tb/Ivnn+HrR68AAL/+9DG++OYR3n6+x/7XA64+v8bu8+fgz5RNEt/eLMaZTiekv/sc9Gt9it16j23XASlBQlB6PqDMEQBIos/MkpCSYGmhALBz2DqHH/zqRzh+/xI3P9pifLzFrwxk+fyDJ3hy/YDr7RFbP6PngLfzFhsO8FY3tHUznnb32PGER+6AP9x/iSt/xC/3TwEAf/3qOcbZ43i3ASaGgDEFxpvR2xgEt13A9cURl5sRGxcwuBlPei07Ygg2PINJrXSPqcftPCBY5hoS482ww8PU4+ZhW86t7zTLcpwwzl7Ld0TFUZkFXRfQOT0HEVKgg4B9r0KujutcZWBs4wJ6jmBKYBKcogIyx9Dh1f0e0+wxHjtIIswnp4kXAD5pRk+xbjZr8pSZJECKrLvC2aGktbrs1d1GSzUSaEiQRIhbnYM0OPBk1rsZOPHakTO2SBaeLABIZle0O88CSJ+qK0uTJEYWs6TV41JnxzaOIHqyoqiSuVbEHZX+aW7EXRnF6QUwxkbE0u0FWDhuAEA0txlKCmbQhAXTozBTWped1h6WrM2WeWBjz6VEsSOI2fMGeMhgDeQk30qmJJeK5E6bflo7V93kz0z3uICWAAAgAElEQVSNCm7pfWqMADuv0EUVyG3KnhZB0LI9Y2IQ3j3GJUJKJvTr6vmVsQXYOmuAify/QM6omSDmdEPB5iMf4pO+7wzwaoGS5kJIEtDMFdzIgITN2SK5bsttcnMF6LGyEWr6acVRjU2T1yAF/Uy5H7gBEaCsJZ4JsbFbzue8nGupbAkWBajyuTipYF0jLpxtePP9xrMBVRnIzPdLyEwNWrzXtp/BOmE9Jw5YrrGEhctRti1vgZQsMCzNsdJJfd8EhLOQMucyIOuju8c7YOka3/34beKsK1NkjTXWWGON70K89yCIeyBc/vmAONQHPmHgsN3CPxD23wj2X0dsvzrAv9DSlfTVCyAldPsduo+egadH8AeH8a0+ac6/2eF+u8VtD/zd1Yf400cjnj2+w6PNCRedZqafXrzBs+09/u7yCb75+AL3PxgwfPMIw0sFIYa3Cf6Q4E8R/naEe3WL+OXXhSkiIRRNkX9MkPegvodES/6nCQgB8pf/FsNfb7D74fcRnl7g+JGW2xyeDnh4vsWb64R4kYDObEx8ZVq4LmK/nbDfTHi+u8Oz4R4/HF7jDy++AqAgycvTBT6jJzjebyCjlq7IpBOtMh4er2aPh12PoQvY+ID7WctlBjdj52fs/Yitm7HlCSN7XDnNcqMQHvdHHGOH19sdDnOPkLg45ADA2AWEqOyTDHZsfMC+03nMLJeND7jwIxIIPQeks6f/vR9x4UbVNaGIZNnGIfX4anuFKTocQo8ojPupx8Oome397dZca0gp+mQ705nRkHdfE4CZVFPkzPYSZkmqCZfqSlCfmT9AHAgxJ4yUmSSEZGwRTXCwdLhINa88LzXQHfj6ogIzMDeSfEwFXRRYqNvxWnJQ2yslDVITREFNzOJGyu55lqsQFgjTEsTILAAAPAHuZM4c+RA2ZgTXBK/qJ1h7Qd5x4CmfdcB0SXYJdCzBts7zOdBMZyyNmlSel3S0jjdlbjMgkjtvSjnICaQnJOcAY8602hu5D3YG7jAtgCMx7QcIgROQzMFkUfZk5+6OXJkIJfnGglWRQQgto7H16hOoE8CAkdRq5eRxGEtKMvNloSlSx5DdS3A2xgVDxQDEukbPWCO5/KUw4PK5GHDCCqCWMVrfPFPRUilWwqjtZvaGgoaCxKmZJ7MfhrnDzHq89AZERMFMaulMVjalLj914FpCJsoIozov+nl7n6Sx520ACiv5ooAGWT27iWFrmnVukn2nxCGfQ+6TChBSyvbsOuV7Yo011lhjjTXWWOO7Fu89COKPCR/8mxFh7xA3trvuVZyxe4jYfTXCPUyghxOQLXSfPwWYgRgh04z+7QTxPVKvT4w8EeRWxfLiG484eHy92+HL6wC/N2HT3YjHuyMeDSfsP5pw/ECtau9GzS5enzxw38HfeQwvB+y+usTVZ0/Q/VIBhmy1m4OHAXAO2Y+VvIdME/jxNUAEudojbTtkQQ0eI/j+gPD5F5BxRPx3/x78qw0urxSEuby+RHh2idPzDY6PHcLWw58EYTARQGh5wnEA7jeCLy+fgR+P+Pn3v8CP9t8AAP5g9wIfb27Qc8Bvdle4PQwYTz3i0Z6sTdA03nW4Pzk8dMp6+Jp1DL6LGPoZmy5g30/YdxPG4HHZV/DnslOA5MPtHd7wDlNyOFrZjAhh4yI2roIWSQjbbsbGL2kHF36E5whHgq2bEcvxrJ9xMzwnDDyjowi2jGjDAZtdKK8DwCH2eDlpSdDnl48xRo8khLvTBkkIITJC0DmIkREfOuQyipa6D0BBoygo2gGZLZKTNidasmGXlr1a9UKoiM6mySFNVt9vCXzW5yxRkikUpkQVPF0mmDwDaMAAni2JyskuZYtTPT7upMpj5HKZtjygSbwBA0CcldzkkopmTDmJ5iBLUVbSA8Xp/UupnmHWxaiMgdxO3olHY5OrArKtGG7qSMdj5SU0GWMmsyS+JYnPpRo1+W2OybohCU2piYnnBlK2hRhokj9uVqfSE1JQXZi25Ec8ihjsEjiwaM7dnZRdUMRU83VgWSTCes1JRV1tuLxRrRTOWjrprCNKIFK1lFbQOLeZBYAXpRplXmDlJWqvm4WBW+DjnMEEnBFRnCh7JYsHdfXNInra6LCclzCRXYtyP7Do3OdD2Kx/rfwrTVzBRwDogLgXpN4pG8RYUGXtQcAjgSNBRIp9dI4sgCyswG3WEypjZKgI8Lfo3eTg9t4C4JDvVftO8FIZPmJ/N08DFIHpipbaSWusgd/OFPn/EyvLZI011lhjjf9Q8d6DIJQE/Tcn+GOH+UKnI3VanuAfItzDBIggXWyRnmlie/xwQNgS3CgYXs/w9zPcSZDlMpKrO3ccADoA/kiYTx5xq8nv3bDB3dUWu4sRF8OIy82I6+GIwVgOTAl384C7cYPXN3vc3fZ487MdLn71ewCA/Ysfwh0TKAri4DDv+Z1du+4hIQxKSQ8b0nKCLGEwA/50jYvvPYG7OYHf3EIeHiAHLUXBwwP8169w+eQau8eXiFc9KAjC3hewaLpghIEgHgiDw3S1w19MP8BvPlQQ4+cffIlPhzf42dVX+P7uLV6NF3g97vDi/kK7OG6KpgY9OICd5uk2/uAEJzco+6SP6PqAlAidMVGYBfvNhG03Y+tnzMlp2cysT/dJCL2L2PhQyuljYoTEGMNy6SchJCEMbsbofCn5cSQYox57By2P6TgWkARQIOTSnfDYP8AhAR542t0DAJ71dzikXkGQMCAKISSHU25zHvDNgzrzhOAwjV4TxZzwhCz6qiADHzVrbOnt0iVN9JpMkDnBDbYz3UfEwEiTK4KRic6ySaftSCP4mvuglJ0sNCHPAqqZhZB3pTOIQAbY5OHMWcgzv27H1cHqPcMzKohCUt0rgAqCZO0QB6Se3nHVoMUOdsNmyfT/vPPtADdLKVtys4CDoHvQ46YrcwzKzhwOCGKirmyf49pFyySoifmSYVDYM0LgDIIwSomCCKkuTAMOyELpVoBOk1WJBGGGNG5GwgA5m/esadmwZySTDsiEdE2vJrWsHUcVtDFGhESUDF5mBWBAWiZV2j9jb2RAh5ytqaZsRgJDiijP4jI1OinWVqQKhAG1DClR7QtLEEQ1d86SdztAgAI2UrRrnJtpxW1zuY8dnwWS6wH20+4bvZYZzALcEJB8QpwZNDpleDVjccylNEscFi5IEAERVb0Pl9fz8pz0nlveTxkYSbR8jaKBd0frgglhWwFHvdea74+ZcHqWzrtcY41/0lgFW9dYY4011vgPFWvF7xprrLHGGmusscYaa6yxxhprrPFexHvPBBEmyMad1c3b9iQT4q6DMCHuPI5Pdbpuf8iYHwnckbH/gvHkrxLcGNHfZ74zIwwCJrKabgCiavuFjnwSxFOPh4PHYRjwdjfBuYSNiXpebCbsugnP9/e4Ho7wHyeknxA++4k61Lx+O4AOXt0wtgm0nVRTIO9eCoCbDiBzmhBZ7Lq6E8EdGcenO/Q3W2xfX6K7meEerOTn7gC6PwAhwt08gGKE9B7OUxFh7RzAkSEE9HeC4Q3AYYMXb58BAP6Pj/b44bM3+Pn1F/iov8Xz/g7zhcNXF8oU+fp4hbfjFl+9vcQ07nR3PdPTAWTbWd2pdBhdB0pUNlXFCe6tHMQPM7xP8D5imvQ6EQHbzYQohM6cXwTAHB1usm6BCaNmBsTQzxi8iqACqh+ShHAfNpiiwxg9RAizuYI4Euy6CW+3Wzzr78Ek2LCWxwDAhRuxMaXOK3+Co4RoJTYA8Dbs8GzYYkoOp9jhftpgTozZHHDujxtMk0eaHGTmokGQLTwxE1JHkF4p+4mszKZLcEMRPYDzukVs1Q1aHpIXvdnN8jaU+UhztcuUwGor6hgUSateIhWHHUDp9GTMCQ6oOifIQqW0cJg5Dya99lnUlSMhkRirqkYWe0QHhB0WFP5SwpM/0rIgGjtcilouEOelg4g/CPxJ0B0EcaMlMQtdDyGECymvL0pO2i+QhpTTMiuy3TEF1TpRJxdAjOkQA0H6s3KoZke/vJ51YsRcYJqSILBVgTT3erEwzlbPHpDZhHOlEZK1zwhTYc5klkCx2GbSkrtcJuSt3cxMyuwQAqSvdSy5MoWsvAReNYYo8LIUqnV7sVIioobtYkyQcp3P2y+sqaaddu5YzFrZnJxaskMmp9gYyKxjCxknH5j1d9xZH808J8dgLxCOeht0VErRlEWTIKO2k3pZsDDEq9ZLXiNZYLcVchUGqMNC2+YdhpVUpkhmauUymWy/S4kQvSD1mWGTryMgF2FZyrTGGmusscYaa6zxHYkVBPGE6XoDClUpkkyDITnCdN0j9YR5yzh8qE/Jx+9H4HJGuOtA0WH7eoPhmwnDa80U/MlhunAQJ5j3hDBoYkMBYBOmixt1gXAjIQ6M6daDEuFo1Pg3Q0S3m+G7iE0X8GR/wE8fvcCn+zcAgPCpw2g89g0H7P24EPPc8IzfnB5hSg4hOTyEHm+PW5xm/czD3YBw0yH1jPmCMF118EcPfxzsHC6weTOrdkhISB0jbj3iwMXqNTkCRSkuFZu3Cf4oGF7rOE5fXeCzpzu8+PEFfvr0BR73B3yyucEPh9cAgI82t/jydA3HCZ+dOqSjM7tMe/COBlIkAElr6CmgJJziazlDGDzmToBNrMmGE8TAOPqEvlMwQ4QQAmM6WUF8Thbt59HAlN40Q7b9DALgOeEwd7g7DJhnV8Am5yO8T3i52eN6ewKTgCF4tFHe+dONWg57iugoIoLhkIp4a88BTzf3xQHnIWwwJl9AkpfDBW5OAw5jjxgZ884jHh3oYPozI4Nn0pyPqn2qOEbauDJPsUvLMoLGDQlR6f7EWoYlAJhiTTzNoUaCJtGapHEt9cj1BMmSLtMHcVO+n1ABENQSk4VgaIRaoWZR0xbIQH0dIkgbwyQ6WZRyZCFRDrVEYGE7msEByUDIslzGjYTujtDfaWmMapvYxxPQPQAgwnxha0+orL+ib8Io4q6UoE4grumEpIBEGZwsYFIiSOCaFJd1WctTEJsyHCeWsOeSECoCtAWMAIrmBwgL4ErH3yTR+Zo0S4MSgBlohTdzaQWSgjjJU3UcYkHq9VTjFioK3AjQSAamcknKRAvDF2Jz3pE6x4v1UAZhc9Fa50LBEE3uWfsqVsRUxgezPRYSUOMKI+eAG4mVLdmc2yB5JLC5KEkuvbHv9zy2NDLicGbd04I9XtR1KZ3Z9cLAo0RII6vzk4Ft1bLX/sh9m3ZNO09V10UWAruLe7IATSjfsQudkZkXmjtrrPEfM/4xWiRrycwaa6yxxhr/2HjvQZDkCafHDn5k8KxPkDzrw2QcCNMFY94R5gvC8bm+v/3oHt4l3EbCfMU4PGP4g0d3o6Igm2OAO3VIHaN7IIQtWx2+IJhY5bwnhC0hDgqGADA7RR1X3DKS95gIOG0ENxd7HOYOP7xSEOSj4Rbf90d0FDGLQxTGMXbF0vWxP+CjzQ3m5NFxwJg6fDle48VJ9Th+s73CC3+FmTeaMDtC2Nex8AyMV4zukOBP2mbszxIS0XPSHUeBv5/R3QiGb/Qkppce46XD7f1j/NknFxgen/DDD17jhxcKgjzr77F1Ez7e3eLhqdrcZk0MAAo0CCHN6oiSXVaK1WSXQBNrImUChNKIWQoL5pkxO8HUK4Oj6G2MNtEmxKgXQDABmFkwd+b0Ex16H9C5hNPsMY0e6VjHmDpGYME0dng4bsAmmjj0Os8vdxfoOaJzEZ4SgjCYBINTwGxwMzpK2LgAhmDvR2wkwFv2sXEBj/pNccwRIbw5bXFzr5bA00MPjGw74HXnl4MCbDoPgDhX3ECERZ0sfJPxzIQYvCaIgatYJwDuo86Z6TyAdR3wkEVB6noo142A2eaYRi5ggZieRiuSCpKy7uxP3dlvdr/F+sk71kLQHf1GpyEn0mLJNc9LTQ40LCkx8c1yHXtB2ANhS8WVphWzFDIdnYMCp2GLhbhrC4KIgRQUDMdoQAghqq4epAlqto9VrICKPkUR42w3401TpGhmuHri0qJKWVsEKKAlyEAJEohnxA3BTVRYRVkctjVGyoyEtmkuorKAzKqvkkGQ5BW50TlgpCjVlhXLc2nXQXlZpAGeKpgpTRMKbgmoLATULyXT8ihCt1k/p1j0GpMpa7K0kY9pQNgMDmCuYFGxpDZGhph+TgZB8nuxZ9OukaUSsSjoKPav9J0nITN9SJA8AycdawHczrRqkJppbb6b1RlGqpgs21iQwUo7lFHsfFvmVPfavQs+rbHGGmusscYaa3wHYgVBOuD4jOGMBg+oaCAHwXjFOD3Rnd/xw4j+mYqG/vTZCzzMG4TIOEyM8brD6c6BTGyzu53gThHiCN19gj/q07M7hLI7P184nK4dpkekFouWOFWBPEJndP3kFRT5+uEpXj5WcdbdbsR+M8GRujTMiTEFl50isfEBH+1vwSTY+wmPuiM8RTzulaEwuIDLfsTn/WMc+y1S7+AONXF2IwAiJM+IGymU+dZRgyeBO0UTqxRQErjjDHc32jnswJPAjw6Hlx0On3j89UcbfPmBlsP84PotLroRnhI+2t9h30+4GzeL6zMHh3H2cC4hRkZKVEpXdsOkIqenDvPJV2CjXFyAZoYkUTHHnNy5BgDIu6qtKGKqjhcxMhIz4JS94VyCbKImkwDICdiStOnUwXkVgkz2+QyiUKHt6+u916x5283oWcVbGYKLbgRTwoVXQK3ngMd9wqPupDbBbsJtGPDm0Q4A8M1pj7fHAUdjioTJYT540MxwD3pSPOd/efeakJIUlx9AEywFDVwt0zCKfpqpJG5F/FFQrFALOEK64c5O3UHE5jhuGGlmda0hQYrqCtTav6aZSzlUSY4zS6e5njnvJdHkkwvyAS0JyrvqxjgopSBNW7m8gVJ9X7xa9QoLpkDwB4CnhsVh3bsRkAMAmNhrdsiZUAkxfbO7LgDachMbVwET82vlQogyewiAMSrq9j6WPzM9wJ29b2+VuTPmAxGKi5B4TY7TyOBTXidWBuNyoqxgGo8NQ4CAuEF112mFRaHfVeLq+SvjoBGwbYAdsTKMc0CilIawNXwOIOTwUss3Cghif5sLTGZBtOPnEy8AsMy4IVvPZdxowJKFsKnOUcs2EgMHARSWD09m/xypAmFAFfbtUEpyKFbWkeRz9wLhhEgEmrk4UinAp2NLmTFUJi+POwNy2U0KBrrUQWTXGiQda7ZDBgBJgu7+XeHhNdb4Tzn+IbbIyhBZY4011ljjPFYQpBfcfyrwR6AzTQ8eATcKpkeE6ZEgXCZ88vsv8dPrFwCAvR/xYrxEuia88hE3p2t7gO5Ku5SAsFV2iVp5CkAEnqwM4kaQOkLYKadavAEg9pDqj7rrjKQPzPFE6G4d4tea/I7dFiPnXX57CG8cOZCALy+fFfp3t5+x3U64HBSgeLI94Pn2Dhcfjvjq4gqvbvcYbzegB10S/oEQe4LvCemo49GksSYPbrE9C0jHSIMHnzTrC3uHsGVs3gZQdBDWJ//7o4Ig/+7Y49HlEVfDCftuwqP+iEf9sZT1MCUcQo+bcSgMF9dsTT4ZDhj8jCk6vD7tcXMcMM4e0ZKZMDuk0ZVkis1VZjD9FaCCErkcJSVCjAzvc+Io8C5h25lWiI+YgiuaHSIExwkhMubJw/mElKiAHiEwRDxS4vK6COFg7z90PTyrFozjhGPo0LmIQ1CEwnNCz6EAII/8EY/8sZQUjZce92GDl9MFDqHH/bzB/bTBw9jjrrBFPPjgFpa04hvgJydOVm6kFqtSbGV5rmU1YNuFF1IrV0Bfa21ejYXANofOJyQWnXMSpMQKNIWa/Usn6nQCgLKd71kGxrOuazbwgJr1Xg7t6vmAUBybivWwMTTEabLq7H1hYwk5QdzoenELcAKAB9ytwB80wVbWQx1bnt8o7wy9jh8GMFEDIFg/yQEJBBad3ygAHKn7SB5Du+WfSL8vzi14sy5GDp8BOAVCiKDlTz4tdFdkZGVtNO3FRMq2ymUReQ4t0c96E4VB0OXkO2sRaclc1duwtWIsBeqTaZh8y4TZxW0qcQr4CHMcatk95RhvwNBMVjLTjM/sg6Vh0JR+7BxzOVUudyk2uZmll3VrbP3HjfVn34ckOl88W5nQGVlFS8MILuuKmGNRAShm1v8PtlHnqVMwJDWMIhCBgiyBwgyqwa5NZqpEgkRY6ZId66SUJrlQ74/MrMrskZYVtMYaa6yxxhprrPFdifceBNkMM/Z/8Bb39wPGWwUx/L3TXbxtQtwnuMsZP//gS3zY3wIAfnH3MW7GLbZ+xn/x4Rf4m82EL7qnqNPZwR8N5BgAjlIeqjlUFkXZxDbRRmWg2Ljeqkij/D/svWnMbFt63/Vbw56q6q13OtM9547dt92O26SdpDFORAaikISY2E5EJIghikEKQggSooyfCASICBFKkJAFX6IgAgkQICZIWArxlMROx076tt193YP7zvfc9wzvVOMe1lp8eNbae9fpY9MGu/v2vfVIR3VO1a49rl1nP//1HzS4TEwad/TZ4wd7I7PtqbEDMFuhvAMEbeiqnLaa8t6RfOmdk5bjmws+eetdbp2+y/W85NF22sfXrrc5m6sSvTLklxq7HGaGE406W2lMOzRMtg6Y2mOi38bmxOKKODueKewmkF+naXrouorHjeG6LJlPt8zLLberBT4+1Vvl0SpwVZc0nSW3HVoFjB4oBJVpuVNe8/z0gqu24qopWbTia7JsctZ1Lr1BUL3Px735NblOUcQCfqTXRVNSO0sXjU87ryltxzRryHXHxLY03vQgRedF3lI7y7rNCEFJDK8bvp+AluDjTHsEROQcGKyVyF2jgjBHjAMEwLDaU9qW3DiubMWlnXCUrfsI3htmyUvFQ1ZVwUU3ZeFKOq/Z+JzreB7ONgc8WMzY1hneabGTsA4dG56uNcIi2ViRG5Uif1GpIVqNfiZiA0dQ9CIGHXpJB0oRWqE5uCzOrBsBRbwWv5fghM0zBlFQAfKhAdsxN43lnOrZLWk2friHYkMckAbZBFw5yAZMMpnUco+FFBna3y9xZj3imCK7Ubv3XJBxrJtA1kYvjJFniPKpA1U9Y2Vcso1ASOBJZFCl6iYK59PsPag2Nq7xPAYTwaIYwd273KZKjInAjgknfkAgwpjNo1Rk5/h0eAKORGaTSuBIObCvaFUPqjCWp/WyoBGrI616ZKLrI/NliOEVTxSV7umAMIySOakKvbxKdlkQpv6sJbaIG87p4JkScQk1IAWyXTUwPNJ12gGY4m+WjcBHZIr1DJW4bz4da5SvpO+3RtgtKsmx0niO59DUaZzSS28UDOfQK/GS0XoAK9UgK3JW4YsoAezUE+BK/Hsn+5BieNECgOjeu0X1Mpp0X7h8zBSBJgs9KL+vfX0z15MMkT0zZF/72te+9vWhB0GOszW/+4VXeWN9wluLIwAenM9pNhZlPUXVcjpfsXEZ//DRRwD4hfdu4jtFNas5vbfi99z9HD+SfQtf1s8A4DNL+VjYJShw8UHXZ8PMWnL1D8lfoQO7CRSX0gwUFx3BKHwWZwVHdGoQf44xEKJbechNtG/TBEwTRtp4qXYq66kPc1Z3Tvipb8154fScZybXvHzwiHuTKwCsdpxt5jzaTHnvfM52kUWtveqp4SamG4A8SOfXmmypyRfxYT2PcqNTjXaQX8fGxg3fb1xOW2Y83lquyxJ3rJlkMj2fgImmM2ybjE2TobXve5VLU3KeT7g7u+JWseAkW5HrjtIIwDG1OU1paKPGKNHJj/I1RYxJqMywrUJ3XLYVjbecbUR2VDu5RXxQWO25XQgQ1sZuoYuvtbNsXMa6y+mCZtUKSLLtLJ0zOK9ojI1ME3DRoDY4hUN6S288Lig2TUbXRT8NJQwKpQJWe6zxPYsH4Hax4PninEx1bGMHX+iOud3yciXMpfUs5735IZdtxTYeTzpHAJdNJQk4TYELisNi2x83wLsXh3LMrcHVRjxDkjdFKhN23lOt7mUgwURQxMQkkC6CKEmqkpgpyYA18/0Mv7IDC0LpgO/ygbKf+V4KonMnrJ9OZuGD9ajC0+YxLafWmE0cd1vVm7W6KAlSIbI5OkXIwGcBV4aRLELGrSulSdXbgPGDf0WSGaFHYKUe1i3riPdjZFP19+aYzRLi74Qf5Es+j78fRcD7+LkdZCQ9jpKa/6AGMELHk4ccbwgMkhE0KvMCUgEkeVQ07NSJ3WN9f10dBqUDyngB9Vy8qxII0Wi5rl4OXAWFaodhopzCO9WbiYZWDHZ7tgtAq3vpVVBx3SPvF5GniE9QL9Eas2ASuGFCPB3D50FLY5/G3iBlGVbfk0tsGECjyFyBYUwGrwaj2tH7SWrnk6lqBOaSdMd7M3iNjEyF+4q3ia41wYW4Hwyyp9wL46aQ32LVyP01BqC0ViJR7MTDRDmEeZKuRRpr8Tp0kyBGrmlsqABZGKEq+9rXvva1r33ta18fnNqTXfe1r33ta1/72te+9rWvfe1rX/va14eiPvRMEKs83zn9Ch8tH/Dm9BSAn86f5+FqilZwOllxUqz5mfeeZfNFYYoUlzLj1s0K/kH7Eh/9tod8952f5cezLQCfnd+jebOieqjIlgHd0KdNpJlnXynsOmDq0BvxZavA7A0xXw1W0x7IzL7ZBmzn8LnGlYJb+UwNs9jj2eR4Rdtc4bcqUv4DpoX82pFfy5Tj9D7M7luuLg748t0pX7675cbRkjtTYRi8OHvMtx++C4fw+vyUy6Zi02U4r/v4Vh9p6U1n2TQZy8sKc2EpH8g+Ts4Ceg3tgULVgdlbG5rDXHxQgPpQYxpNcxBwm5w2z3hza5kciG9JYj9sm0wkG63eYb8QFJfWc7kpeW8y56Rc4YOmi9PzfsRU0IRe5vKVqxtUmUyJHhdrtApY5Znaul/eRQlB6wyrRmQu1kzxQTGxDfO4bK47DJ4Du8UHxcbleBSP6ikAy3u08yUAACAASURBVLbAed37fayanLq1Yg6KzBYrLb4IIYD3mu02E8YFwn4gqB3fhKtpxcOJrP/+5JD71SFTW3N/c0gXr89RseFeeQnAzNac5ktO8yUuaIzyTJJZBrBwJUZ5tj4jU45Stz2rBODL1U18UFw3JZfbinWTiXwmslW80xjr8F7hvY5sETvQ+zvdSwoSE0R5hkSYILPuvlJipmpDPx2ffEOUFZaIn7h+dtrkHm1kI9Z6uszRrnMxxcw81cEWNxnOc7vOoVV0WyNsED/4dCSD1D4ZOMa/JnmAeF8EOgZz2bFURtgZ9JK0nZSVOMvvipF0oWeCjNkmoY9e7U1TGZhcvYRjOD0ih0hSj+i1ERIzwisZVCPCjoqSLEbsiF7VlPle1hRQBC8yMhV9JdKxhKAIzgwMjHEcr5UDTzHJ6eOUbEIHWg/bJEgksM+j5EeJ5M5nYde8NJ3LGOecrk+KCh6nGY3NZvvI4J6OE+U1iO9MzwIZJbUEq3bPTfx8/LvT34+KngEV4kZ15nrmUojyL2VCL/8KWYBOSRRuHAdhzARRI/ZePHcqsWeAEIKY3dqo/fExKnnkzROyuG9OiQzL0fuUyDiIJBUjDCN34HYkPQQwpdtPk3wd68V8yV99/if4gTd/81M//6vP/8TXtJ5f7Pv72te+9rWvfe1rqA89CHLZVXxh+wy3syu+vXobgPbY8E4pgMfU1pxt5my+eMTpZ+MDZjQkdTls1jP+VvUd/Bsf/TT/6p1PA/Cxg5f4Bycf4Z37x+Tv5hTnivw6YLZgBSch1JJGY7eeYKQxsVtPiNGs25s59dzgMsjWgeJaGq+UHuMyaapQYlZqannI7eX/GtqZfO6tQndivJgv5Sk3W3ZUZzXFhaZ+LWN5d8LlrQlnt04A+PnTWzx3esnz0wsx5axaVl3OusuxUUpymG0pTMeyy2m85eF8xsOTKctCjE/zK015KTGg+cJjHy2xD4dmpbs1Z3NZsLxj6GYC6vgLw3Yq6E4wDIkfhYNG7zR0OEXAsFhbVlXJw8lU0kmiv4BRAaUCLvp2JDCl29qeup4VHVoPkpMy6+icZtsICNA2Fned9eaQDw+O0GXHfC4pO0eTDcfFmuN8w0G2ZW43uFHncGBrPIqpadi4jEVXcFVXXBcCQjQRSNDR/6BzmpqsN4MMTvcGj4nu3nQFF7XcuqtNwVk+o7CO5abAdZIQYzLPq5PbANyYrTjIaub5hkI7Mu2odNObzfqgKHRHqVsmusEFTanbXj70wuQcg6euLJeTCVdtybrL2XRyjrad7dfVdJbVNmfjlFwvRiBdH0G6ew8qjySQOIXPFL7yvWQqVbAa13qUFYq+MiH6RUSwqtGSSjPyJwlBURWCcljj6apGTG0bS7vOoNGY9WCGnCQvYZw60htFqhh3K9IEb+SeSsfiDfhCltdJDgIkE9H09yElBVKkcQJikkmrCgzxrAzfV7HxTu8HS0xpSYMl3u95QkRlo4Nprbyn3JD6EZL0BFDZIPMQqYuRe0wHkZ/0F2M4Htnx0QU1QZr0tD8EPINRsHZy3lKMrXbgGiVNO+BNQDuFtwL0JqwmAQCqG8xGvaKPgd2RqkQA4qv2N/3T6d20Gj2Mp/74G7PrM8JwTfsY7hFgMJa0+KQpUaGX54wNRhO4E7L4Go1we6Cml0zG7cRz1sfidgrvGQxvIxiVTLBlG6qXMXVFBKUiIJJ2LSjxvQqFx0w7kSn1953CNfqrT96+9rWvfe1rX/va1wegVAgf7oec4vnnwkf+yr/Fx28+4JOH7wCS0LD1GT4ozuoDfuqNF6l+asb8DelG6kMtMZGRwbG6p5h910P+6Mt/D4Ajs+JxN+Nzm2f5+2cf4d13TsjOMspHivxKznd56ckXjvx8CyHgC0vINIvnJCJ2c1PTTiRu02yhuAjky2E20mfSKAQtsZ12K2CDHxnZtVMxWGwO5DVbDsasdg3FtaM626I3Ld1BQTvP2B5Hj4sjzeYW1Lc7zLwhyxzOKVxn+mSVqmqYFA3bJmNaNBS2wyrPO1fRQ+LVOdO3oXocyFaO4nGNfeMB7uEjAPThHDU/YPOxmzRzQz3XmGZIJXE5uFLRTaA+kThNX4SdhohANCCEUMQuJDVAO42QQtUavVXY9dCN9gaNGsxaiQ9ESp5A/COKCwGisnXA5dBOFJtb8v36xOMPOyZHG05na+7OrqhM2xuvVqbF4OVVeVzQXLYT2pCMUw0rl2OVpwuai+2Eq7pkE0GYzTajqy2hNsKgCJEtMWpck4lpaAy0CnttZII6GSkeOFTpKCcN1jqJVdYek45RezLtmWQNpenogibXHTaCSQe2pjIthW7FBNZbzptpz7hJgEimHc5rlm3OxXLCdh3BrJgSwpPNY5wZ11sdZ7vlMxdBkARKwOBd4Mt47BEM6ZtzpwQoc7E7jgwBW8p1yIuWMm97sGld58K4Wcl51kszxOXqMKTJjBv+xL5I4MTI6wI1eP7o6DfSf5TW4QdmSYgJIyoIACRjTe7lBBrpjp19EGBAfhO6KuDTb1D0XtGJfWEl/rgHOlKzbCJO4ogpP4jvRhwnRGZCOofKiefETgZu8n6J5yM84ZdBMqhNhqjxmicmiG6koU/7upOKAj1QE8aJQ2r4XVOjz3we2Q+ZF/8KQBmPyfwQ0xwioJPud9iJwNYj8CO9Oqfx25H3TbqOPf0mntdxtPb4v1E97PeOQW3yDEm+Snq0vNr9vjJh8BxpY0RuGm/R4DZY+dMDKIoeBEGDKYQ15TstbBQFoR7F9Kh4H8WI69DqHrgUNori/l/8y9Svvb2LSO7rV6U+9ckyfPqHn/sVX++eGfKL194gdV/72te+vjnq9T/2J34mhPCpX8l1fuiZIHYN+icPeeXuAT93V4xN751ecVys2bqM1x+fwOtTdAvLuxEgOJEmRHeQXykm7wYW//gmf4XfDsC/cPdL/MbZl/nuw8/w6yZv8Nnbz/GPz1/gjQcnbF+XiNvwujxs5ueglzV609KeTNgey/vNPDY6eaA5CbSHmuJck11HEGMrRqqJNq+dRPGmvsHUgWytCEahWwFtXKmEHQI0h1AfWXxWUT6ymG1H+WBDKV6a+MzQTS3bGxmb0wmugDzR/OOo6copl5k0bg8OA+2JY3prxSzG8F68vOJyNsF/WWMaxeKe5bi8S2GjKejVNe6td6g2W8r5jO7GDHNdo1rp6kKZ4auMbpKxPbUs7xmaObjJLnBntjF1xaidZkSFgf6vO9lPXct50zHVxrQhpt7A5P4WVxnwAz1fuUB+1aCXDfrRBRhDmJS0dwTo2d7M2R5lbG7l3D+Z89aNUyaHG46mwhQ5rdaUpsUqzzPlFTNbc5StKWNHkym3I0c5yde03rBx0pyfbQ5Y1BJ52zQW1xmRKUQ5TfBKIjVBms7YLNm1kgQIJF7ZW8u2ynbSHsK4WTMBlbuewq+tJ8/lOswnW0rbMctrjvINmXasu0EuAyIb6rymsi0HeU0291xlkk7jgjShKRFHKfpYYRC2Tdcawtb0zWHwQY6lZ0NEkCRIjGvwIyCsP57BUBOnUGuL28gBr7OMetaS5x3TUsA7oz3bCOg1oSA0sek1iGyjG+QDJG/QEemCbLf3HUfdjqNre/ygGTXi4yjStILYeJqtACm6E+PVQQ4TmSMRROja+J0eBInmrl7RVQKWCBMs7p+JgEMEMYRlMcgskgFrAp56RotSPaAWUAMmspN8MjoRJsjxm9AzC0Jcd8gUoVF4lxgMSbKXTuJoPWr0qoaPEztEQBo58Wk8BKXpvEMZvZMs07M8grBSeqaICrLJ0fgMne4BkJ6RlNge8TvCNlEERvdQqjB8R8WI3p7lkc4PDPG9agAz+0rARMw0Dhp8BIdVZIWIjEZ+31SKSW/jILQBH4EPpYIwqACqYT9CTPehNtAoTDsyKwZ8GXYNW/e1r33ta1/72te+PiD1oQdBdBs4fbVlcmapX5d42DdfnPD6YQe1Jrs0FBeK5hA2z8Qn9cMWm3c4r1msLbMv5EzfCqzqmwD8Tx895NWP3eE7j1/nU5PX+P7jf8TvPvwsX7p7h7999zsA+Nyzz7B4u2R7dMDsfkW26mgOsr45zxYK3Yg8oDn2dFPRfyeWBCqQeYXqAu1M0XqFaSUVRsqTLUSbrhuDzxXLu6aPAHUZdBW4yrC+oSmuc/JrT76Q5tysWor3aopHmulhgbca5YJId0yaMZeH82zR0RxlrG4ZFh+Z8/AF4Vy/eOcx28Ml71an0tzawPqZkoMXZbbr+PNLzGvv0Z09gLMH2IdH+MUCnxpcY9BGkxtDURZMfs3z1Ddy6oOY+FFIU6e6MERAduzILpQPPUhktwJ4mNoPIMi6Q3cevW4IX3gNGwEaMS4APZ0Ivb+u6S6v+nFj35IG/6CqmE8q3DMnbG9XLO9kbG7nnB2LJOjdww5Vyrg5Pl4yL2sO8y3HhXi/VKbFKscmGrxMbc1JtiKLXeGNYsnGZay6gnWXcdVUXG7KXbmO1dIMdZqgA50zvXQiDgVJh9hoer+BsWcFqUGW76kgQNemkn3YrnK0DeRFy6yqyY3rI4RBmqy6tTSd5WS6ZpI1zPKaLPp1+KB6fxarPZpAYbv+vRQjfN0UrOuctjMiW2oMbiPXo2clJO8GIzsaCrmOOvNoLQwXgsJ1mtBpVGRZqMbgW82msLQTSzWpKazr45a903S1kSYyptO4VsMqyWUUOp4z5WLvOmJ2yIBM7IrIVmD4d3+uoqVJanh9NnyeWB7KAy2kzrqXbsRX3QyAhzeReUJqjgUAJUjcLgp0/M0IBlwpDXyK4FUjtkJiv/Tym8jMEYlNfG80bkJkETBu3tOuRoZISPsdEkih8Cr6WBQDM0InhsITkqkEPA0giYr+FhLVrYDQqh0QIhg9pHDF/fR9vC0Dk2p0f+wktHjVs8wEQR0Ak36lTwIbY7bMqMKYjZbOUxy/KgIWAoqpUSwNIvNKq9NIIkwENWk1NPQ+H8LISUBhvNZeZGE7sp+gdqVLjUZvtYztNrJz0iF62B74p17bfX1z1dhLZM8K2a0no3Nhzw7Z1772ta8PS33oQRAxGNUUC9f7ZShnaGc5ZpsaB9jc9kzuLQHxgUjSjy5ovqJv4b+UMzmLLI1NzucvX+TnbtzjJ557me955hU+UbzDd1Wv8a3PvQvA63du8pPf+jI/+vGXuXj9gOpBhtmIrAWgejjMsm6vNM1hfJiPIEYzT8aoAtCk5jaZTWYrTfVQD7GcXgAfu47HmEN7IDKT5hC2a0221mQL6eryRUF+7TC1j7p7iev1dngoVpF9EowiW3TMa0+2sVytKwDezo54/sYFL7z4EIB5seXBszPOXoxymdkBh6cl0y9MCY/OCXWNKgqUk44nhICva2mgVivsT6/JqoqDuYBVocgJZUYwhpBpfGVRzWjqUque9S376wXM8UMjoDcdyjnUuiY4J9sblcoz1GwKkwpbyXGFzQZ3JVG5bLdwcQHvvMtkPmdy65T2ziHNsXSNm1NLV1mUh83NksU08Pqhg9gAqsxTThrqbYbWgRvHC47LDc9Usv6DbMvNfMlz5QWawON2ytlkznktjKLruqRxhrYztM7gnKY9MNTLrJebqFbv9GjJuHOncUzNeqt6OYbKYpO2kRjfTW36/YRhdl3YI9A18nPiJopJJvIfAI3CK4VRHqv9jtQGoDQtM9vQesOqyzmvJ6zbnFWTsanlPHadJniN0h6lIM87MuOYR9aR0Z7Wmd6bpPOa82rCdinysrA1qFahNoau06ycoqtaMitjbTKpaXM5viT3ck7jTPyJvB7MVIkMI90O5zAk+4TYkAYdCFbtgiBJFqLC4Ouh2ZHIdFO5x3t5ECOQI247+f/oWrxkhosYX0bSkSS5kZMCIJKveGGG/U6H4iN4GPfzKX390xv+nv0goEgIoMZMkVRGWDxKqd7TghBIt2SSdig/YlAoJAYW+c1JHhlpX8c+KSkiOB13MODy0LPXfBaEjWLiNpIPTAIH0+Gka5YkL08wVEKSsKRz8bQTpYJcoB6XVv33gej7osRA2Jmv3kbatvXxNbFxoiFqq3ZAtV1GjYIUsx1GYMxoLOpG9X/SWBmOG0Lpds7Jvva1r33ta1/72tcHpfaeIM8/F176w3+cbCnpLAAuU6ggqQ2bW4r6hiccNxwdrwA4rMQQNDVytbO88eAE86okdhQX9A+6m5uB9vmaF+4+5pMn7/B9R/8EgF9XrMgw/Fyr+JHlt/Fjjz7Gz791B/uuNG3TNxXZKpBthBLfTkTK0qfL5MJ8cGXAVbKMzC5GKnutyK9V3zT1posuNR7QTRT1aRC6fKCXhaTvi9cIPX2+qxi8BIjUeyf7YlfR5+Ta01WywMW3GDYvNdy5d8Eka7ldLahMy+OYnPLKqy9Qvms5eDMwe7clu6zpZjm6iU/yRqFrh960AlK89xC/WOxcP2UtKs/ldVKJyWBiy0wqQpkLnVwpQmFwVUbINC7KXXQXolwmoBuHWdbCeLHRs2Ne0s4srtJ4A8EoiktH+Z6MBfX2Ge7R4919ynJUKddR3zgh5BmEQHc6ozvI2NywuCgP8BaaQ0kK8laxvQHd1ONvygU7Plny3PyKZyeX3CmuKHRL7TOuOgFkrruSjcvYuozGWbbOEoLiqi5ZRQBhsy7w3dCoPXnLK8XgK1LroVmzowUDw2yyGr0iYwIVm9PCkRUdRd5RZDKYtAr4oHBekRkBQrQKPWAxyRpOijWVadHKs4jGq1tnqV2UTgVFZlwPpMwy8Sk5yVbxPFRsnPj4aBWY2pqrtuLxVsbau5dzNquCsDGoyJgh95KAAUymWxTQdgYfPSSUCn2KT7vIMVe2vz/kvlK7TWLsTXswJDEGRswJ1amhgUaAj/5zI/eiCvQNvnh4xHs63m9mG1lf2yeYGYpe8pL8glQYmnvVyfZcLokgySPkSaNTFYQlEkYQeTJbDXp0bDaIf4ofma+msaLYZR2kipIN5eJ3EsiRWBHJ5DSNr9SUp9+1JjIXIuslASBjUCl5Nck5jcBHBIa8TcwQegBBdXFf+h+2MByvCTvXtK8xOJI8UPpjHC0zksbsfB9QjerlXGNQMphh+WCiZ0uSeaV1dXIO0+99fzvugEEMspmxxwrx/dHY8FG+tWPce1pz/0/9INt33trTQb4O9avlCfL/Vnt2yP/3+v/DGnkaC+VXqvZsln3ta18ftPrV8ATZB+Dta1/72te+9rWvfe1rX/va1772ta8PRX1d5TBKqX8F+NeATwG3gDeB/xX4z0IIi9Fyx8B/AXwfUAE/CfwHIYSffWJ9JfDngX8dOAI+A/zpEMKPf637lFct+tdfsVgWhJWcDrPS2JXMFjc3OsxcUjESNb/pLFXeYrRn3WScTte8fOchX47rvH5QUTzWlI9g9ha4ByVnR/f4oRvP8NMffx6A7332FX7/wSt8ZzHjO4sv8f2H/5T/5da38w8/8lEAfvb+XS4fV2QX4kmSLcTMM80c+lbMTYORGWnxeBi08cEG6tM48+cV2kF2pWRZhBmSrQKoGE2byWxv0v67PNDOZfaZEGeQk5/AWOeugYOW0Gns44zZG5b5mzJdfvLzjtVFzoPFTfzU8dbBMUfzNcelmIZWN9ZsJzn1rYzFw5xskdPO6I/BVaGnbZstHH7lFgdvrDGLKFnxHjqZ8lVtJ5SGPCPY6BlyUOAqi8/Ez6Q9MHSFxhWKrkyDSCRREikMpp7KzOlI5hAs0Vg24LOAXWXkVxKhPDmbM33vRfKHK/TVinC9xF1eEhbC5BgzV0xZYmdTypMjMHHm2xjcQYHedqCRlJ6ZZXVHmCSreyWv3D7iCzdvcff4ipfnD7mZL5kZOQfHdk0dLC5oam9pg8Hgue4qLhphi7y3mrNpLZ0z+MhwMCM/A6VC78vRtgbv5O/9EiMGiU/RoZ6BgqBkHAH4jaVuDG1u2aQ0jDgj7lKSiwKth1SOLO94VE6ZZC2TrOmZI1oFppmcR60C82xLph257qhMK+ySiOOuupwu6P67PmgOsw0HVs5Tph3n1YTFpmC7zgkrich1kYGwBkyUxnStEXNMHTDxGOyspfOKsB68GlwZeuNUFcSrY8xMSEyDVC65kY7YIfiBIBCUAu3xKfknjNgSgG8lRcfn4BowmRqYKQwMEJ9HdkXyzkiyGuQ9uwbnlLBB9Ghf4kLC8hikMrobZHY9e8QGaNTAuhqzSbponmqDJLc8ySOIMhthxfDVULwerbBnG8Xt6xBTeJI0KZpDjwkX6d+BHblPv/lOiVRHy5gOkSmiet8ShsSc8W/d+DgSm8czpBHtyFlGchl4qrdGyNI6kpfHE9uKEpU+jWdM1dGBoIYkLRWUREOPPGqUU+K9YqD3gUlkEhV9UjSEwqFsQBmPj/eoil4v4QM8TfJ+fB7Z1772ta997WtfX5/6usphlFI/hTxo/G3gbeDXAX8O+HngN4UQvJKn6h8HXgL+JHAB/FngE8B3hBDeHq3vrwPfHZf7CvDvAv8S8BtDCJ/5WvbpzidOwu//734P113BZSM+Cw9WMy6XFUqBtY4QFNtthk+U7cagyw5tAt0qQ2We0xsLCisdSeMM55czwllB+UhTPg7oRh5C17floXXzXMfLH7vPH7j7M/ze6Rd5xs5og6MOYky68B2vNKf8+OLj/P0HH+Wt129gLyz5lXy/uAy0U4WP6SxmKw/xPpPPuwq6yUCxD4ZeHgMCbuTXsl9okQC5cjBA9AZCJr4DEOU3edhJyPBZkASBWUc5q/Fe0TyYcPh5eRKfv9WhXGB9y9KV4CrF9iTQ3JSuTM9asrzDWk/TGLrGYvOOqpRzcDzZ9MaVi7rg0cM59n6OTd4tfpDr2E3A1LKf6cHdFUrMU7UcbzeRv/siDFHCOh5XFrAnWzHrDAqfDAbr2PTrgMo9JneSIpGaieuc/LGmOFdUDz3VY0dxtsZcin9MWK6hbSDL5dVaVFEIgDOuEAhtC20HeQYnArJsnz9ifTtjdU+zueXh7pY7J9fcmYpnyK1ySa47MuXQKmDwTExDGwzLToCUs3rOsi3YOsu2yzDaY5XvQQitAq031J3FRRNT53Wf3tJ5jVEBl+4DryV2s3tKh9QpafjsqPlNDV2tB+NGO0hCsB6VSbRpFtNbjPbkZjAutdr3ZrLJi+e6KVk2cozrJhPJjJXzoFRgljccZNv+GDtvYgxxxYPzOa7VkkgD0vlaj7KB0GjxoPAQStl+fliLR8hySMVRpZNEGYBOTCZV8mnwkq7SyzZIEbdPyCvG3ixKpCrSlAeRjOhAyEe/0V6aeImaTSahw/pFXiOgkyTFRMPL+F27BbuMoGcWQY2RD0Tax9Q4K08f4QvD/vsxfK4YjEdJsgwBcPpI415mMQIFUgzxk/jACPj5KoAhxUTrQPJmSfsglzEuODJCVdtB4qVjsoqb+l0JjwlDnG3az96zhB1AQz2xvyFJzcZJMkm+oujTinakMemceETa0g6/KT0o5NRgBDyO0U3nI5nSKnb3PS2WzISTl8jOOgJkAZ1JdLbWIk/b1sP4bq4L3vvz/xX16x/MiNz32/PIN0oO82Tt5TEfrtpLZ/a1r319M9QHISL394YQHo7+/WNKqXPgrwG/Dfh7wPcA/zzw20MIPwKglPpJ4DXgTwH/fnzvk8AfBP7NEMJfje/9GPA54D+O6/ma6ihbc6e44qqQmfNnqiuuDiu2XcZlXXEejT5DLQ2TWRjCytCVHjyY85xH6yPskcxanx4tuX16xXqWc32ron6voHisyVbR8BQorixvnT3HX3j+Fv/7i9/B9935DP/i5Iu8lInp50zDM7bmd08+y2ePP83feeaT/NziLp89uwvA6tU52XWM+HViqCpJAbJ+uwYfw0x8pvrUicSA8BbaqaJogwAIW/DbJ05MAFvLeoMBb3afhV2u8LnCFQWbOzntSYeatyw+GlkBmeXgbcfkrEO7IEk2c8PyjpzH1XOG+tjhjhomkxo7lR0oo5eEUqE3uzwoag6efcj1jZImGv51TtMBRdZRd5Z6K41wSASE5JUQGQw6Sw6T7DbwOqCtpyjb3ueldbKNJrO0WysxtK2mc6o3MwUIZUd7bKg3luWVJb/KKC4Oya8lHaa8cOhGzGPt2olRbefRXTRGdQG9joYtTQuPLgjrDWEhIEp5taB8fcrBvSPWt3KWdyc8vFXxzukNALKjLdOqocxbZnnDYb7hON8wt5vR+N5QmZaNy1i2BTr6athkXKoCXdDxs4BVji4YmngOamex2tN5zbYQz5G6tf05CgE570HRtQbXGJmpbkfn2Evz2jeuY0ZRqwlOSRpMI54ced5RKytpLwgRoO4sWgXWbcamydhscly8J/to0MzLLL9XPMwdVRXvyemaebHlKN8wz7ZMspZNm3Gxknt7fVGBUxIbGoe5rnXvRdFkGbOjDbUKuAgOVZOmj1XtWkNXWnytUZ0GxwBUtHHYRUZV8t/pK/XrXsDM0Em8sTBJRj5AyUw3ztB7G8CoYXY/eYpEhkRwaSZ/aPadU1glCTKmFkZFn3AT+3QfwUPZKLtATi7LqwS2ROaISfHHLvpLqMhO6JI57C57TFm/w1zrX6Mha1ruSb8Sci9vRQBjfL3Gy5myG1YZsh4E8UZJik/mJQUofW08Xp80OVUMkbo8cSyAygR0Cj36OgJmYtQtCkL6+YmghjZiKOxbMdF9EshQSvfGsLsRvKO/jw1avRr5hkSgSI/eU/SAiNLC/NAm9KwsPzru4JUAgU8c6wes3pfPI/va1772ta997etXv76uIMgTDxyp/nF8vRdfvwd4Nz1wxO9dKaX+D+B7iQ8dcbkW+Juj5Tql1N8A/oxSqggh7EZ9PKVqZ3lzc8xJvmbVFf37U9P0D4UhKLT2JON95WSWNWhFmDrCWpNfGPy1NFRni4z8qGY+3XLv5iWLg4LrOxXrRUZ5Jqc8v4LqARSXBV956wX+5qTE3QAAIABJREFUwnPP8EMvfZLfcfNVAH7/wc/xvBVA5NfmJZ84fRV/+nl+5hnZh//w8Hv54pefITu3+FwMUZUHE2nrqoP8WpqdbCnpCN7QP+C7Qv60M2mUdEcPeoBE7dqNx2w8pnbS6SqhV6cGftwr1DdKFs9aFi9a2iM5U8uXwJWGyXua4tpTnjeUDx12KV2X3eTUJ5bNLcP1aYaeCPiRGDe0GrUx6A5c5dHzgTkCkt5xNNvw8tEjbhRLfFA8qmc9A6Jxhm2XUTtD5wae+KbJ2G5lxtN3MkvsasNyOd1pImQBUFuD3ag+ajZksJnHW6f0mMKRzxp81VGfarYbi17LMWQLMdMMCuzWShRlQx/Rqxzkq5TgEygfHWGvasy5MD3CegsPHpOvNmTvTpi9MaW+UbK+Kduvj2dsD2FVBc4OPBy0lJOGGwcrjqLsaGIbdOycbIwaSaakIBG2ue4oje7TW3zQdMkc1o9lJiJTaUZUgM7r/l7ZdhnrNqPtDOutdO9KBVxnCBNh2KR4WDVmBaggMb8BXGfodMCpQOL3KwVNZwjAalniV1ZSbxLgFdcZlJFGz4E3luUqRgm3hvU0w08VR/mGj80fogmcHwj761V7m02d0Wwy8qqVfbjKMTEiVy0s3cxQlC11Gjte9UkyOkpn2lyieEMnsivnB/PKcQSt6lQPIPSXIgEOUdmVAJTExnKt3gEs0jAdx7/Km/RyFBT4cQddBRqvyFYK3Qa0Cz1oCAKoulYJQ2z0tT42N0pMtIZglDBCNL0hp+6ibCyXnwzdKFFO7TBH4tjPd0EIGQp6iOzVQUZtkoWMvpOWD0riofvPx/dvOrEjJkpQAaywI/p1eEVwI8fndD3csI4wOhlqdAypwmgfe7PUyAD5KhghMpVQAYUaQJLR9UMh0dwhnXR2kl36Y0wslVT9MmH4zD/xvXTM3uBbWCd2ltM7pslJAvVBrffj88j7ocaRuk/WniXywatfjkHrnjWyr33t64NU74eI3N8aX1+Nr58Afu4py30O+ENKqVkIYRmXey2EsH7Kcjnwcvz7L1m1M/zC1Q3OsoYmaiSs9mgCtbOsW2EXWOvpCukE3ESjGgUGskmLKxzteU5+Eb//bkZ3aXl0VFCebJlVNSdHK7oDzfWBACX144LiQpNfweQ+lI8KfuGNl/j8TfEM+cEbv4WXbj7mnzl6l9988AV+a/WYQ13xXZHJ8V9+9H/mfzz9Tj5/fYc7lciXXVAsWlngui358oMb1IsC+ygTgGSjRD4CfTJAV0FXCaijO/DxbJpG4Y3CWkU3MejWixS99RAf5XTrUXWLqjsml2vK+yWThzMuvkWG1fqeY33PUR9rspWhOK8ozz3lRUzkeNBRXinKx4bNjYxumuGKwWdBN2A3InXxxtJNbZwBl+3bDs6PZvzDZw45Ollyc7pi1eZs2ggQtBldp/FOYzOHiw/53TYTCjsIFb1T6FZhV6LN1x1DbGlsWvtY0vhZdh3jYAvoJoG29ITcQxZQpcPHsVLPdGziJAlC/tDLQmT2X/dNZrYyZIuC6lwAsOqsxj5awnoLlwvM+SXTd0omM2ne/UFJc1TQzi3bI019VNJNS945nvHWsVAQqoOaMm8pso4sMit8ULsxtbYlBNXLT2AASmZZ3YMcpekkGUltqEyzs1yhO2pv2biMs82cRVv0n7fOUJiOLmic17R+YIk0ncV5RdNZmlriUZzTKBV66UEIct6d0/haEl6A3rshGD/yVojVKVTsCutFwXlnqFvLclLw0vwxt4oFp3mMvc43PNjOeHtxxJ3pgi5o3pwcsXgk6TLm0rJdFBSzGu/jOGoNOgJyxnisdVgLXgV8Jv4KSoce9PFODTIQJyAIJgzNqVfoje7jcYMJGD8wPcxWic/GKFb3Sa8I1XtUhCHJJcppVACXJWmbwmx35ToECC1oF9CRSORytQNgJHAkJdHoKKdJYJR4CElz7rOYVhUUIaZSBRsIIci/bQQCRmwDZYTBIsyKUUeeDjHJYHSIKpDIgEggWFzcOTOkzPgxIEJ8X4AAnIJO89VIRdruVwMBQYXd5Ju4fTWSHQmIEf1jurjNBLq0Oib3yJgNacyOx26SwIxlNOneTBKgBLyM/UZS1vAIoVadjuk3SJuePveRrbOWeF7lRqwRHUGQp3iZfMDrG/o88n6vpwEke2Dkw1NPA0z2wMi+9rWvb9b6hoIgSql7CFX074YQfjq+fQK8/pTFz+PrMbCMy138EsudfC374Jzh7HzOIzN4JCgls9daC11ZGjIwkT7tj5peTpHlHXdmax5XE9a5NEzle5b8UpEtMppry6PDEj3pyPKOyVQQhK0ObKeWZm4pH2mKi8D0LZi8K+v12Yy3Dmd88c6z/PDz38pvuPM2//LJK/zm6j4An8in/Ce3fparG5/mUFdPPbbXXlrySnOHv3v5Cd7bHvClxze5fijNtbkyZEtpvl0ZehPFbjI8YGun5AHaDvGcAgjIedKtMEd0GyjPW7LHaw5+/pxsIVKQx03B6p6nu9XiM8emNpgry/RtmUmfvOcprhyzdxrKc4PPFc1U72j1gxZWivIBLqC4dn3DohuPLzT1Ycb25JQ3T0/FsyA1TDGOVGmoJ5Ft0SmybuRpoKSBUy3YtcQC65ad7aeZ7j5SshPZQjpPPlMEa3CFoZtIZHEfsZklxod4JAQT5JF4NOvbzVUvA6g7hWkU64V0ueXtivK8pHrUkj9ao88XhKaBBxLLq888ZVFQzmdMZxXdYUFXGeojw/pWZMQcF1xPA34SgZoYZzv2aVCZR2deGE+tQZkhwvb0aInzmsw4cuOYZg2zrEbHzvco25Apx6Fd44PGBfEQaSOoqJXHqMBJturNTGtve2Bl1RXU3nLZVDxYzdg0GU0TTYoj0yIE+XsIClN1hELGQWIFFWWLc5qmscLEaLU0t6lajfMZi1az2eTUznA1LXl+Kj8h3zJ5j2eLgtNixc18SRsM82zLlzORHT2qT1ArQ6Pzfn+oTWSrgMs9rtD9OVNIo57lXd9HqwjuuM4IsUoHtPY7QE9XZvhGo+pknDs06HYzGGiGgdg0gBhKDSwQHYEQIiMEotFlIExkX81GQJDe3NULC8Vuo5+IC71vSDLcNE1cDqShVuyYZyofsQ0ProzMly7KPYjskS4yH6zcD0GH4Z43YbhuCUSA/hz4xkQph4sskdioj9kb6TWxIPSw//1Otjoy21T0cVGDPEvFPyMmSGLxpN3Chl6qIqCV6hkXvTwpGb+qAEYR4lhNoERwajAZVmM204j5oRgYHaP7VY5XDSBLWuZJnx4VwI3Wn0ANF/8dgRTdxf2IBrTehsgO/PCAIO+H55F97Wtf+9rXvvb19alvGAiilJohhmQd8APjj3j6vNyTT2Nf63JP2/YfAf4IgDk+wl8UQtkeP4ibgLICjIQglOXUkE2mDSEovFf97PmzR1dcT6Qzfi87JnuYYZeK4kLhVxmutLR5oD4YeOXKBsJpw7qytHMT01vkY7sJFOeQLQ3N2TF//+CIH73xce49K89Uf+iFn+IH5m/9ogAIwEvZjJeyJb+z+gnuu4bP3L7Lq1vxFPmJhy/zxTfukN/PxEdAA1nARaaJJE3ERr4IfQqCCqOmyyfmhCK/Kpnez5l/ZUP55iUAN/wR2TLn6uWM7lZgdromv91xeUvAos3bJdO3Mg7e6civO8yDljI3+Dymu5Sa+sjQFdHTxIFpdA/CBKPIzxsmX1yANXRHE9rDnPowdTyhN0dtZ1HW0MkI6YRIIcfrR+z22GwkuQpEkCM2e6knMM0ABJkmMkS0GFt2U937OLhqaKJ8LrKkoIbGNP1deUDJTH9ngzSQQHug2NzUrG8VlOcZ5fmM/KrBXspUvVpvCZstarnGLNeY84zCByZlzsHNAwCa45zmwNBMNa4yeDPIn+QgZL9dIde92A6mmQBnt2VQhNyjCjGHLauGaSFMkFvTJaVpOcy2FDGuZOMypj1TxDMzNSd29dRx2gZDGwxXXcUsO+ZsfcDVpqR1hszE9B8VMDoQrOPWfNn7xaRDOMhrSXDqMq63BZs6p95kfdpFmu0PG0u7sZzVlqtZxflcxmJ2wzExDTfzJbW31N5yo1hiT2T7n16V1I8raDQqF+AipJl6EECk0ziTPCAiONIZdPzdsNYLsKoHSYeOQAikxBxwmcEpYcR0I8lIsAazjo1yYnnosMN+Sck0Pfto6M1ll4Iw2ELhcVbhOoXuRkyMTuG2EQzsoh/JE3KLlBilGwFKlJd7UVYgkhq1lu15A1qJgXPabxWb8oD4i4SxXCUPqCal4qgoGRrtX6sJ1uOTTCUwMD5Ixz38UV7J93tJiJL1RiaOcgIEjA2f+0Nt04mWY0p7EQwiIUp+G4Hd3xAf/VwgmtzKReiTXCKLkPh+bxacCB9ByfcyP5ifjsaUHAMj6R67pqzjCmoXTHlSPmZCZOrI/wE7mMcvxo75ANY36nlk/Czy/L33AzH3l1+/lHzmydqzRj549UvJafYskX3ta1/v5/qGBODFKLkfAj4C/K6xwzoyc/K0WZPj+HrxNS53/pTPAAgh/LchhE+FED5lZrNf1r7va1/72te+9rWvD0Z9I59Hxs8iN0/N0xbZ1772ta997Wtfvwr1dZ96UEplwN8CvhP4HSGEn31ikc8Bv/MpX/024M2ov03L/T6l1OQJHe63AQ3w5a9pfzzYKzFYTNp3lwdCEQgm2kkqiYYN0eehKFu09mgN203O2dUBt+ZL7s0kjqV8vuOdyRGbq4LswmBq0d+bjcJvhlk/Vwb8zKEqRzfpaG9piJGbqtbYjcIuIFtBcalQb2RcfOUOAP/5r/ld/KOPfYlfP3+Dd+pj7teHLNqCFybyrPWx6ozvnX2BUmkmOuNFO+Gj2TXMxHDzc/N/yn9z+Fv4P/Nvxy8z9FriS5O2XyjqDBGNNs4YqmHWtp+JdIrmhmJ701AfTrn5GVmgeG/ByaYiX1Qs7xUsX7SEe0ueuy3PjdeHBRcnc5qjjMl9w/TMkF+12KUwCMxGAznh2NDMNa6C+kiMUtO1K88tR51HXyzJ3n6MvZ4CIscJBlyj8RswtZbYXy0sB6NHE3RhoPS7PH4er5MKUTozpqXDYJqrQIUQTWkDphFmSJr5TiwS3Ul8cZpZT1HGQdMnhqR/+yz033eVxBB3E0V9olkvNdnSkkcT3nzpya9bzKqVlJmmRS3XsFxhL8UrJitypnlGqAp8lcXkEY/qfRo0PjP4wuCtwm4crjT4TE7K8hlLMNBVhm4i7JntpGJdyPfP5keYwpHlHVXRYHTAGsdxNGa12jOxDWdmjtWOTPn4Gr1hTEOmHDeypchqsg1n+ZzLuuolaqly7fjo/BFWOTYu5+FWQMyJlThcHzSLScFlXfF4PWUVzVnbxoqx6MaKtOHastka3lzJ553XHJUbbpcL3l4d0QXNcbHmMEbsfuTmY77CKW1jyYsOrT1ta3CtNC6+MSKxSEaY8T7pGtP7R7RmNxY5eWGkFCM98mMhSSdy3y/nTIBgxashskCCHTHYQkxpiWwt5RC2g99lCYRaCUMpmZGm/wV0gCyO00r1rKmdcZ8JW0R3oK1I5vDDeEcL2yrFduvo4dP/rkRZRpKJCANjkJr4Rn6DfC+fCSg9yF2UA59rQkjsiLjf4xgZP6yvl7iMz0Gne4aIGKXK5dphnATVs0QUCGNjbEzq429GELYHmiGVqpPvp2WCDqixX02I7A837PbIA1jOO0oYJJGVuJPS0l8TYQ6FLFJuRtd4WK8amDZPcBWG94MockaEmnStnzSA/aDV++15ZF/72te+9rWvfX19Su2Yz/1qb0wpDfwNxEn9u0MI//dTlvk+4H8DflsI4cfie3Mkku5/CCH8e/G97wD+KfCHQwh/Lb5ngZ8FvhxC+L1fyz6V954LL/zbf1wermMz4IuAy+ODrQ5CHx+Z4elp1/uGdFsLTmGnLbdOBGC4Wa3YdBnLNufhxQHtModOoWuNruUp09RAULhCPCTCQUdWtdgY45pZh/OazSonnBfkV4riseqlIO1M0RwGulkgu9aYrawzyVm6aSB8ZM3N4wW/4cZbfGr2Gr998jrP2oH58sCt+MHzf5Yff/gybz8+olnk6KWcBNUqdHxID0NP0cflwuiBOYCbeELh0SvD4Zfkg+MvNOSP16jO4w4Kls9PuPiYZvtxaSyfjWDIxbpieTajOLNM3w1Uj6LPw2WHcoGuMqxvWzY3FN1skOn46FUyOVOUjz3VY1l+czPrP0/ghLcKlyuaA5HWjH0MUpPnLX3aRe8LkhrA8EQ/MJbPIAkedhv67/XrTyoBJ+dLGotBPhCeaE5UkGVSlHF7EH0ZrNDudTRWNaNxZNeQXweydSBbOAFFrrboK3k+D4sFoWkJzqGUAq3FVySVMfJ+lqGMJjQtqihQEwFa/OmcYAy+srTznG6iaSeKrhgkO10FvoB2GvCFF1lDTPvRRtIwlPZkmcMYT2YcPpqjHk02HBdrXpo+pjItE91w0U14Z3PUmxUv24IQFJVteXH2mEw5fFC8tZGJVh8Up8WKyrQYPLXPeNRMudiK7ul8M2HTZKyXBWFrUNtdElw46MjKjqP5msvrCSEoJpOa44kAOR+dP+Jse8DjzYTSijls7SzbTu6XxaZkvSgIjRF/lzhGUiPdjwU9Ai1S15kGlmIw3ExvWZHlQVRxLDLMMjbxOo6NJJkJYNa6HyMqeVrEG1V39LINVwjYNh6r4tGxO2aTBG7cHCdJnOqSb9DoNyHeO3Yd/UOc3HsJYE4gpLdffY/ASJ6RDGDjn/7eC7JMkpURfUV2pCJjGUzaRgIokldI4BcFB+ReV5IM5IePwyiFJ5jh3yFGF/epNk6JKW+gT+jBj7YRBOgZvFyGcwMCMgUTgfgkiRkbscIO0ANIlLNiAHuSz4hT/W/H2LdkZ9ylv48lQfH79//SX6Z+7e1fUtLxzVrvt+eRT32yDJ/+4ed+RY7tm632MpkPX+2lMvva175+OfX6H/sTPxNC+NSv5Dq/3kyQ/xr4A8B/CqyUUt81+uztSEP9IeAngf9eKfUnEbrpn0Uez/5iWjiE8Bml1N8E/nKczXkN+HeAl4Dv/1p3KGhwRdgBQfqH3dgAPzmb6Jdx2jMmE6hG47aad2tpyJqblmcOrrk9uea0WnO5rWicYdNkrNZiVlkvMszSoGtFtlC41tLONF0VG8eDmknRMCtrFpOG7WlOc1hQPpQn5eIC5m8GlAvYdUsy+OwzDpVi8/mK9fGU/+vubf7O3V/LX3/ujN904ysA/L7Df8LHs5w/c+MV/rnpL/CTN1/mlctn+cqFMHqXlxPCRYbZKEwr5onKyexu37PFZAndBbqJYXtD080Cixfi6coKZu9YDn5hgb1/wdH9C6ZvnXD+QBrTt3/tLQ6fv+LX3DzD33jA449OeevhMZfvCQIwuV8wOQsUV4584VFe06xV75fRVdAdBBYvBFZ3FcVlTn4Z+llklAAguotMDCMzzAlgSJcQGDw/4sx673GQxsFoPKR1p++hAqqU2XOJvx2tP1nAJN+PuJ7EwlCM/DlC9B5QYGJas27F06Sbyr55GyCHbhYbYy+NjdkqTK2xG41dZWSriuqRMGKq99bo6w16sSI0DaFpZVo7Tl2HtpNDrGsZP9rAZoPuIuXmeoFWkrOSzQ8IZU4oC0IRvVsmOd3E4CpNfWBoDgw+h3YmN9S4QW7zQKNhbYKYMQJX00PeOOh499Yht6ZLPjZ7gENjtWPdCVPjclvRdIbcOq6aEq0ClW25ruVitl7zYH3AvNhyUqw4yjbcKa85zATEyIxj2RRcaM8mz2htLv4eKVJ6YWlrw6PaErayw9dby2ol679RLbldLphlNTZeyC5ougjkLPKa9/QBm21GV1sxTu7UTmqIMBzifdrH4446z5Qqkmb+Y6pKSjLSpUNPW1zIMOsRiNM35xJjHNoEYoYYuxvHWmR6qU78PHqz0rgqb6NvjRlADxXHaz/eo1eQMkAGLo7ZcYUYxa0c2Fb8dVJCTkqSUT6SF0brlb/EcxVUz0Ybe/H0RqRdOldyHMPnA2KjUBFEGLaDVzv3YQK6VYqbZfis9yUJ9IBTXxE06E1MRyk/ApyPonE9YBgxcoQBo7oRuhNGoAhxjLRKDG5H4Akw+JCktKHxTvOUf44BkP78yveTWbSPDLUeiPHgp+6rVvkBq/fd88i+9rWvfe1rX/v6+tTXmwnyOvDCL/LxfxRC+HNxuRPgLwHfB5TIQ8gfDyG88sT6KuQB5g8CR8ArwJ8OIfzo17pPxQvPhjt/9o/KP7LRueiUzCZaPzQZ6YGzHajSabZNtaqnLLujjpNb1zw3vyI3HVb5PjVi66QxvGoq3rs+YHkxQV9ZTJ1o9LIJV3lC6dFV1yd1OKfx1/IkXjy0zF8LFNeeoKH+f9h7s5hNsvTO63e2iHi3b8utKrO6uqp62u2t23ab8QbDaC5YJcRIIKS542YkJJBAAmmuEULM3VggkOCCuQNuEFhCIA3ryDNm7PEie2y32+6uPaty/bZ3jeWcw8VzTkS82e3BnnFb3VXxSKn8Mt94Yz0RXzz/819WWmQc2Vj1ECluPboN+JnhcGbY31fs78l+zH/4mr/69u/wN+78FnNd8Mxveb8r+P36EQC/tn6H33z+Bi+ul/iNQ+1F1mM3I/PWnSTFFOuI7iLNUnO4qzikbUQTcTeak/ejJMC895J4c4s6EcPO2598jec/abBfveFrDz7hy8tntMHwaX0KwB9c3efJJ+fM3iuYPY+U1xG3Dag0ZruZZn+h2d9XtKuIilC+ULhNYux4ASSKTcDuA8Eq9heGdiXAAgi44JNhqVxjvnNmmFdmyUef9SCHH5I2xDBS/l93o4lWz3EDlq63bmNvZqnbtEwaB10lcabdTBpoX0kzFpIUJbo4gHZp/1Wr0S24W+loqktwm0ixlvNg9x7dBHSTTEcbATtU66FpoSzkbyudqbq6JXYdYb1BmczZH5ov5SyUJcpa4smC7nxOKA2HCxmrvlAp2lgJAyCxAPI4Cg6aE8X+fsTfaTm7s2FeNsKEamQdty8XqINEeeZmLlb+6N5UtSZWnvKk5vXzWx7M1705601b0QRL3Vk2bcHtvuKwL4TJBZCZIXn2fBQVCnD+8IY3Tm84cQdCVOy6Aq0CIRtnqsBNM2PXOg6No27tIME5jKhTeb0jM9B+dj43tQlYYyTJAGEcmEVLDIqwcQKuBIizMKwzAq3um94c/wxJntKmOOxEBDpieSh6Q9/cEB8xphgYEGNwIpuAygIMKUwbsFuJ3O0/1hC1GlgevVHwcIrGEhwBXQZgOpv6juU1jPbzyOBUCWCT5SqQwKF6AGT6iGX1ncyMfCw9M+XV3wMjpsnRPTimjAXVs3Z6dk+SLOkmXdvEDHmVXRY1kjKVZT/9/8f+HKkEtPWyxR4Py+uW8yO/z8YXMl0nn+KYM7NmhAr7k44n/8l/Qf3hR59VJsj7fB+9j3yemSDjmlghU/1pamKUTDXV56O+F0yQP1cQ5PuxyncexUf/6b+LUrGXosQIbW0JSe/fpz1kH4jU1IwbFNUMzUZwkXjRsjjds6xqTssDS1dTmY4HpUhmrA5cNgs+2p7x8fUZ26sZamuw29RUtUBU4g9Rii9EHCVF0CncjcFuFd0s0p0GaQrb9P2dZv6pZvYsUq4Dpk6z17Pk8/DQcPulwL/w87/Nv3Hx63zZXXFhDHMlXdDH3Z7fbl7j9/eP+PbuHk8PK27riut9xS7Njvu1Q9Wa6oWmeh4pNpGoFJs35DzsH3qijdgbQ3GrOHkvcPaNW9RHT+SczWbsf/g1XvxEyfrLHY/eesHX737El2fPAHCq4736Hn//6Ts8/viC+bcLVh8Gqku5TqYJBKc5nBsOF5p2BW5NL6fRXeybIwGExO+gWxialBazfZhkRSdB/Ae6dE3HM6wqEosIyZ8hhhGtvNU9G0P5QarSx44mlkeO1xUJAUcNTz/rHuS696AIqVEzSLSoEclJsPQgTigjvhQJCk5o88qk/fQpZrWWyFVz0MJUaVSfagMJjAhg2ohuRFJlDgODYPGkRdced7lDHRrY7Ym7fS+pCYecFyylqwo1m8E9YRXFeUkoDH7u5DicJhpQaf09k+jMsHugqC8i3SIKwJFYENVzjVtLalIGiro5tIvhOpXXkgZUn8HhdY+9c+DsROT5le0ok4wlp8is65JdAlm2m4rYaYktJV3/UeoKRWB+uufOcodWkXVdYPQQI1zZDmc8yUWol8rUrfwBJGVKRUJQfbTv2POkbQ1tigaOnRZS1yguF4CTjmLe0HUGf7DiIZTXYVJTPgJo8YMXhWpVYoEoidvt0lcHIkny7GDEkhjGb67srdP7kYy0Mv1jUkXMXmH3GdhLYINWqBB7wKEf2yO5TN/ghwSCuJHHjh3JevrYmxEI8kozHxWEYgANlR+BDwzrjeY4ZecoeSWBIGPQqr8/zbCfPXCW5ZMJUMv72FfG12rVP2/UKF1mLBMStkwcgBRG4E26vrpNxz0GYkbXFeifUa+Wys+6LIcJA1PEl5Enf/MXOTz+bIIg3281gSBSEwgy1Z+mJhBkqqk+HzWBIN+Dqr70KL7xn/07aB1xPQii8F7LTG6efR7PiHZKGrRAH70pM3tDDGu0EKqAmnWU85bCdcyKljdPxAfjfrmh1C0ezU0745PtKc82S26T4WW8KTAbLS/8eVZWDS/zYdXhlk2/36ezAwvX0KU36LqzvLhdUF9VuEtLeamESZFYEqaJdJXi6kcU8S9s+fGHn/Kz5+/zV5a/D8AXTE2lNFopnvrAdSjYhZLbUPGkOwPgWXvCITh+9+Yhv//Ja/DenMXjoaHZP4h0S/GHwAb0reXkXc3FN4QCUH5wieo8/u4J2zcX3Lxt2LzjufuWmLv+M/c/4u3ZcwA+2N/lV59+kRcfn1E9kQ0UVwKwrbsqAAAgAElEQVR4uH3s2TDRQnWVGkwPvhQfkOI2Uqw9xVWD7gK+knVs3ijZ39Xs70fakwQ0jZqXftbVBnQyxo1B9QADr+jsVaeOALFsUJm9E1Rip4wlBF01NFO6JTFKRuMtDk1ksKoHRkAaPF9KwxKK5BFRBnABU8lGrEtMD0WKdYYYNCHLuxojzXSURjnHlJrEjpg9FXZLsY4Um4Bbe9y6xWzkOupnV/ira2JdMy7lBFDTywWqKomrBRhNdIZYDEo8dejAKEJp2b8+o1lp6lMtFP30eKouI7MXHcVVI6auIeJnjm6RPGxCpHi5J5SW5qxg87pl/0BxuCsXJ5y3lIuGxazmLN8rQVMnZtbtoaLuDCEI+NA2Fu8HUCRuLbiIWYgpsm8NSg8ghrGeedVwUtXMXYNO0pQ2mN5EN1eICq0EQNHEPgZ43zm2TUHTGQ6NIwRF29hefme2Gj8PuLOasmqpa0u7d1D/MakSJg7MJhiiZDuFPmgZo6PGWCV2gooJvGNo9gfG0/D/mdFzdHj5WWUT4JfG/Bho0Y0wyLIMLBQQxvIzjch4fEwgxggkMQygTMamx/eDGfYj72dw0M0GACEDksTBoyccydWEsTMAQuoIBeoNpNV4ewNI0gMyOh/wcPzA4O9Ta/H2iRyxxMYMnLE8ZXx+gpX7HSXXrP/OP87INKpj+V+WCkE/DvLvMAB9UDz+z/8Wh48nEOTPoyYQ5B9fEzgy1T9pTUDJVFP94NdnwRPk+65iVHQHByrS1qmhMnFIasjUckYzfRFwAVUkpkEypQvpO6pRmL1GHwxhp6k3joMN3BaB252wKE4Xe+7MdpwVe+6WG+6eb9ielLy4swDgk80pl5s5h11B3Jt+Jj/7C9BqisKzmg2z8LdNiUlvuVYH3rn3En9Hc3WYcXWzYHNTULyUt/b5p5rZy8Dd34nsHy/4xoMv81v33+Z/ePTTAHz9wcf87Om7/NzsXS505MI2GFqc2pHT/jyRUz3j6s6v8g8fnfLfvvmX+LU/epviA6EpmINCd5puEfEnEe7V3JQF7VI+Pzu/z+pba8zzG06eXrP65oLDoxXXX7oLwN955y72ixu+9vATfmz1Kf/WW7/Bi0crvr2Rzz+4ueDpJ6eUnzrKS0BBcwb7u4mpk9IZ6ouI22rsRnPygaZ62eFupWE/ec8ze1mwu7RsH2oOdxR+HvqkhgxwqdoQepBLDRO7eQY4/ZHGMBJdBkFU8mWQplAFYYrEkfFqyCaVAYxRhJS8Id/P4IjMoOtO2DaqSU1dZnUcJE1CjCc1oYj4maykXliUC9iyw9iAc514cI7SSPZ1gdaBEDTWerzXdIkJtT4pRe5zUJidxu4N5lDgNuLtMn9+SnHT4dYN+naP2h2Ih1o8RgC0InYeta/BCLMhzkqiygyFDuqA2R6Yh0ixKnA7N3i7IEye8sUe8+KW7HujC0e2aVCHhtg0GOewzwrKF0vqJxXb+3IOdq+VNGcFl6cz1mcHLk52LIqm9/e4M9/SBoNL6NSmKdm3lkNiiuw6SW7yt4WktMBR49tFS9daWm9oZ5q5a3FamCGlFUQhb8tqTxMsVnm0iqycnKe9dyxcIabKrsAHTV1Y9ilVpmsqdK1pd46yanHOE4Omy0yP2kjiSC8nkedSZgioIqCdJ3hFqBSh08mvYpB5qDzG2yTZyH8nUEQn5oKp6b09joxNc8Of2BH+FVCCBIJEI8CaaeMxAJIXT4lLKsp5HhsVRzve4MhnhRHgksAYYVYNoGZwAhjGEW6UQZxeEhcHBlk0iREVRwsybCPt7BGja8yciY4jmQoALgi7JEZiBqJexbHGwMlYIgSJiSL/EW0k5GXjK98ZfTemBJ04/rwIKJdOXFAisxqB/boxg4nvVFNNNdVUU0011Weo9P//IlNNNdVUU0011VRTTTXVVFNNNdVUP/j1uZfDlG9+Ib7+N/79QRdNmjlN/hvqYHoX/XGFuUeVMlUZsySm/1BhbizmAH2CgSElL6RtzDx23jGb1zxYbXhzccXdckOVpjzbYNj4kstmwdP9ipfbOZttRbtLc9+NRi06tJVZxZANGG3mNcPibM/FYofRgRgVbdBcroVpcng5o3xmWL0PbpulFlCfCS62ey3SPGz5whsv+fGLT3mjvGJlDsx1zT0rviZ3zIavuD13jazz427D/7L5Cv/lN/8yANsPTygvtchSqkh7GohFQO9k2rN8oVl9JKap5adreHkN+wPqQuQ27aMLbr404/qHQP3Qhr/81rf42vJj7hiJfvVoPm4u+I2bN/nG8wfs9wV3zza9RKHpDHXruFjs2LeOfePYf7CieqZZPpZlVh/WmF1LtJrto4rNI0NzBu0qJaekWVZTK8xhSNfIs8a9UakTA0ZhhXBMS8+JDJ3MzKtW1iUnfaTxj0MKT5bT6CbJY7o8o51m9UfGkCEZj/ZjMEunksmlL2TsiXQmilzGxH4WWLtAaDW6SEapgDahP48hqDRzrQhe/iao3hvHXhtMg3hApLjeYhOoXqako8ajG58kDGmd7njqWzcdet+CUoR5QXNepmSMxGzaecwuMU06D0oRrRF2CRCbBlWVoDWEdO3KgnAqbJXdoxn7C0N9rjjcibQXHndaU83E1+TBaoMmcl7tesbGrnM83SUT333F9raSY1Z8Z2Rpq0FHVOWp5g2l63BWmB5Fkrs44/tUm33neinMMjFBGm8IUXPwtpfR+KBp0/T8k0/PUXtDVBFz1lAUHTEqmmTuGrbuaIz1vhs5/WXu0fNOIot1IHhDDGDSMyNGhe+0+Ml0yU+mTc+2zDZJEiqzTf4yI08L+QI9CyEbmr5qsCopU2qQfalBZkOSt+heSiNJTXnc61aMdnv4PsthsmwkDvsQVTImHstlHDQnSTZmkpzGxGTOmhk+HMXnqmZI+BmOIUlmRp63Y6lJ3pk4NjbNEeuFl7jjRsPeiAQlDPf84KuS1peYJGrkUxSNyGGiE6khQe7Jsa9J9iXpt61HY1aBMgGdWCkAoRvJ/AAazZO/+YvU7z6e5DB/DjXJYf5sapLNTPVnVX+cjOaf//nf+6dex1RTTfWnq0kO872oCPqgU8JBehk0EGs9JAfkF8Xxi3BQxE6hLPJy6YcXc+U8Ya5RUQ8mg0EaE7tN29hpwtqyrgq264qXp3Mentzy+kwAhnvFmteKW96qXnI5X3B7WvGiXvLxRgCCj5+eEw+G0Dp5QW4UplFIdqU0Jfsby0cnM8ysoyxbFlXD6UIiQ5ezms2dksvlgtkzTfUiUl0HqvcFhFl+qqm/7bi6/zp/5/4D/FJ8JtARN5NlFrOGR6c3fO30Mf/iye/yU6Xhr59+BF/5uwD8d/Of4aP371J96nAbhakN3Vz3+v72JHL7tmJ/r2T2qGD5eEX17gvidTKPvbzm7ifnrD66w9VHS/73H/0JfuXNt/mRe08B+NrJY3589jE/MfuQJ3dP+aQ558JuuGfXAKxDxQf1XQIKpzxOeb716D4fbc/49pN7ANz+wZzlRyWzS48KUNzElA6SrpOVpsRuJO1C/D3EYBWgq1QCQdQouSUQs05Dx74BiWVEmZg8J1L3lIwrc3pESP/uTUMbhamT4WqSvYzjNFU8bmxkp0GHwfjUKiSRI8WJBqN7UA4ELFGK3mMgxpSYk0ASVXmUimgTcWVHWXQU1tOlhqm9n6JyveYQFLfbArU1lC+TLGoPdi8+EFkSpF45BruPlNcddt2g2oCp04cZ9InglwWhcqjWE40WP4QUkYtSNGclKoLZddjLLWp3wGzTeL+dMzuf05yV7O8a9ncsh7uG7Uo8eD583VEUHU0wFNpzp9pyXuzpUpdb2Y5LHdhsK0JQg0HucMqhU8S94RBKauvQOqJN6L2GnJHzWLquN0strOfWJKPhqHo5m1YRowPaeJaFgCS7i4LNpiJuHKExBOtl3ekUNCBmzkkyohrdm/WCALrBRNTMoxUoFYkkUAt5TGkbZBgVHqUDsUhgSAby0v75pabbWXl2BnpDTe1J6UijZ+W4oc+xt2n4Z5PTHgAIAwiSU2ZkuSxJFCAje3uQx1LGLzoZT1GpFLIjiIJuY7+e4BQdUXDvgCyb4s4hgQ6aQeIW9ZHkBw1Hkwej6FmgT/7po46N3Pcmg4w6YEzEWEVrIiEB7Vnmozp1ZFKbn5fje30MkqgiiHdNVCKv6RdATkw6HmXC8B0l4zckM2Cl6KOY8zGak4appppqqqmmmmqqz2JNTJA3vxAf/of/wdELd9ZfhzISioCae5nVa0c5jD5N/dk4gCB5pi/NrFObIW0EICjsLr+kpsYuzdLHmcetGpYL8fg4nR24U215Y36NU565afBRc93KzPb/++SLXN8sCNsUh5C0+bn5MDudtPfCUghV7CM2AebzmkXZ0HSG2/Uc/6Kkem6YPZWdLW8i9iAvzc1CE5ykk6gwSiYpFO0CmtOIeWfDv/YXfpe/fufvYdIB/4PDF/ml5z/Jb7z/Jjwrceu0TznpwaVkE0cf6br8KLJ6LPtYPtmg9imB5GzB4X7F+pFlf1+Ocf+w47W3X/IL99/jndlznPJcdQveKMSzpI2Gj5sLvr27x+vlDa8XN3yl/IQTfeBlEPbK//zyp/nlD96h+3AhCSRbMIdIfZZAkELOoTlII5+vXV9qaEh8Bd6Bn8e+cQkuNX0uQCkafG1D30BHr2RcjaNRwzBmVKdlVr9VmAbsNjFFcmPbIUaJhiEZA7lOeZkhDvOYoNInbpjhOPvPbPIpAfwszeanY7CzDld0fUNcFS0zJ+krp8Weg3fU3vJ8I+e4rh1tbYm16aOnx+axOh1XdRlZPPXMPtmLj8gohtfPLM2ZFRbAq74HQLNQtEthF7hNZP6so3q+x7wUQCyuN2AMalYRThe0FzP29woO6Tpv34BuHvFnHShYXux4eHLbH2MbDD5odq3E34ao6DozfN5YujobzKphOj/5cIAwLmJUaCPXP0SFGTFuAKwNGB0orKewHU4HKiv3g1aRq8OMF1crYlRUs6YHVgCazhKCSua3iq4xhNagEvNKNYpYRli2aBuF1TMGK2BIPlYR7VK092j/MqCjFBz2hRhHw5Ck0ynsrRHWUhzG1DjxpGemuNgDKNnzQ3l6Y1QVVG+eeuT5kdcR6WOwe2PPjt5EeGCEKEz20DECXPqZPHtVBvzsmC2S0pY0wrJITD81BklyRQG381iGBCbm+yUzY1zAloMbsjah995p9o7YmIFt4wfAXZJnRowOGMxulYBKau4HQK5PNxqWGSJ2oySd5V1v9VEs8xGLxESK8wMf/kf/NfUHH09MkD+Hmpggf/Y1sUKm+n6riRky1VT/ZDUxQb4XpSNh7snJHiBgiG4GQETZgDFhMEQEQmroJCkGeVHOb+IWTOmhCMSQZlEjxE7TmdSQpBfnmFIJ1MbQdiVXa9EwXNkVHxYXfHt1l1VVc3e2YelqllZAgbvzHUZH6uVwCQvrqTtZ/25X0lwXmK1BBYXZKthqwjaZXc4c+1XL/YtbLh68ZHNecPNwxuULAVmK54bqhaW8jtg6YutwTHtPFbUkluwer/gfn/8Mf/Tj9/m3H/59AL5aPubea7e8vfgxfu+N1/nDT+/TPp1h9vk8p3VUnnjR0b4OLx46bp9Ldz5/ekH1MjB73uJuG+bv31J9YgnJ8LM5K7h56wG/9PZ9eLRnPq/ZXM97pooxga41tFcl9qxhNq/5V774DX5++S2+XkpM71sP/g9+7uRt/u4Xfohf++iLHD5csPxAU6yzbEPhUyxtfSqAQJbHAH3UrG5SrGwNplWjRk8YGMEqfGnEsLQc52FmZoc0IrEI/cwxyL8jgBepQjfTfcQtIFGniagUbZo1VkLv19nQN/XkMkM/gCEDbR/Zh5RElG6L/h4wjTBVojKE0hAKyyFLf4Bd5TGlpyxbDgvL3LXMbMvDE2H0+KglKcUbfJTmfxwde9gXNBtHc2rwpcXuSuy2IziNL+VENqeW/R1Fu1JDIgf0sqTmRABL3QjQWJ87ZncM8yeC2M3eVcSrG8Jmi7q8pnw6o/hkxfJUmCDzF3OahaY+l/vvcKfgD19b4E6FhWFtYDmrmbsWowOdN3QjY1lrPQft6JQj7gwqavAQoybk6OxCLpTSyaDSq6MUXmUirQ1oHenKls5rnPV9usxri1sK7TEqUncWazydH4AYm6g/ndcYHekqTRc0+1LOgb9xErXaaXw2EH0lOrV/wmnwQRFSA903z+n6FUWHsZ5YctRYh07jOyWGvd1wn4/lLiqtXwZmGocjeUvefkQA0qMI3jR+o5Vlo0mr8cP3cnT50S02kq3YQ0R3ClMk1kmr8JUkrkACO5QWKU9mgcTRvkF/A6mQ/oxAEJXlOUb1YE70qjewzayMWB6DTqTtR7Sco3h06MeV9kcFRaxfAd5evZhjcGO8jgCq1YO57fixZBTNuvgOGdBUU0011VRTTTXVZ6EmJsjbb8TX/+N/D2BgerQSIQnILHuVpCDjSjNoKgMZSYsPaQawCJgiJBlBIEYIQUsiAwKI5FQZSR85zpoUmrgizAKxDNhFSzVrOJ8LHcHoQGk6lq7G6kChO2mOUuTnpit5sllxebPAHyzUGr0fkm6UT3G7d2vOzraczQ447dm10gRebudsr2eYl47iRmH3wyxsP+vaSnKC20WUh/pUsX4LHvykyFX+zS/8Jn9p/oc4FWij5ld2X+Z/e/ZjfOuppLt0z2boWsv5PWmpFg2zsqFuUyLHbYW6clTPNPOnkcWTjuKmxdwIW0ZvdsR5xeHhit0DR7tQzF8EfJKq+FKaoWIb2F9ofKW4+Yqnen3Lz77xAQBfX33Iqdly4xf85vpNfv3TL1B/85TVu8N18AXU54rmLNLdb+Q61XIdzU5jdgqXGRrN8Wx0cEN8Zabyh5JR0pAafGJ0Yl24FNULEnWbxlFEmszY6AS6ZW8G1adiREPfWObRlGNMc1xpP6ueZ4/zv+NAvc/XV1ZAPzvfN5NmaJiCE+ZLKCJhJrHQtuwoy+QJoiKF9RgdsDr08bFNig7ZN4764Ogag3lScvZNqK4DXaVoF7KPzYmiOY20K9lGv9/5Nk0Mpxz3qw8at1aUL+X7Z+92zD7eYp5fE25uxTdEa1Qp410t5sR5RXc+J2olMbuPDLsH8v32JODvtKzOd9gEfhgdyaSDwnrq1rI7FNSbUsDRzOjJFyKnhIxn9MPoQuWUIQVm1mFMoCg6ipQuc3e+Y2bbPl2mC5rn22W/D0ZL7G7rNS4xTLSKHBLYdH2zIOxy1mze5nD9JVJ3tD+j9JsjOUinUKVHO2G0qLTdXO26EJlXAgbMXg9jrht8RPI4ypGxwBA/myrLXo4YTm36nobsJ3IEsmSAIIN9esSKCkmWlb14jMTvdvOBGRVN8s7JLC51zKCKJibwWo6xP6a8CSPnLhRZipO+Z4+/n9NZYmYP5WdCUH30thqx/F4dNz1gkaOPx9crwjgS91UQp5fbZIBEjQCgtE7VKR7/4t/i8MkUkfvnURMT5HtfEzNkqu+3mpghU031J6uJCfK9qJjADz16EY+qjyXVLahr22vZQV6Sg0GMEFuZ6R9LDaLRBBsJZSTaAC72emydTAh10UFU+OSrELSV5iK/TKeXZ91pYmJwbIqC7VIMABarA3cWO06KA1YFnAoEFKWRt/2FbbhTbvmkOmXTFtzsZkJf38tB6LXF1Ar/vOTyYNmcVJwsDiwKmU2+t9pwOt+zvijZbir2Oysv53bUoXiF6jRmrZk9VxTXkdX78Cw+AOC/2fxzvP/2Xf7V09/m6+U1Xzn9Jn9x9i7/05nE8P6v5Y+y/XiFbhRh7ThEhbOeu8utnJPFjvV5ye3dGYf7JdtHjvLKUb2Uc7D8ZIa92lM+31G81KJjf7mGLnVEhYMQiM6yXFREZ1h9PGN/seJXH34VgP/n4Y8wf7Dl3mpLZVteO1nz3psFh9vEiLkWD5A8W+oqiZmNS9lEu7Q0e0u3M5iDwq7FO8TkdNgm9kBXNmkUYGQ0O2ulEYsafK0ETHAyLvw84L1Hlx5tIuWsJVbDJWgXlrYeGv/eGDIqiUsFaaZGzaywReh9R5QW9lNUI3Am7Vf+PrnHSrKaOJLkmENmvigZ+87gi5JdlcGd1PRpel8ZNWJVZUmGdoHu1HP7tuFwa/AVtMsEJhWBmGRjuvDCpvBDw66UGDtm6UGoPPVC0ySj33ZpmT84YfFkwezTM/TNDrXZEWsZ7/7xp2AMdlahqopiVlE9O2H/UJgim9cMu23JujaoeYctOmZV2/eVhfXMihZrAjsbaFtD8BqfPBeOKnf0OV47VzK3BPDKEpJsprNyzz7xhnnZcFoe0Cqy7xyH1vZ+HTnyuPOaRkesDlgTcClidzZv2EPvQZG9InI8q3Y+yXWEqdL7TOTxhOyv6rSAutlLRA2Gz0rRe1TEUsZiZyM6gYa6lWZftwrVJkBuLBmEwUeJEdDRn7vR5wmswLzSvGcAYASC5PXpVsa1qekBqRz1m8dzDABK/HXHnte9GlLuURUzy0UdMSy0l2V0LTdN9vggr1/L92NiiPQg46tgWKuJmTozHkJKznMM+TiH3xlj0EN5NXoevALkZHaMGi7tEKUr5dYDo22qqaaaaqqppprqs1QTCOIVemuOZyM7oXLntIvMgujNMp14KPhiMLDLL9OQXypVkidoMfJLs+W+ks40zrs+lQFAFZ6I6V9IvZVGWDeyDV0rdGPw6YV302qaxnJ7EKp76SQpIs9SL4uas3LPebXjtNyzdA37hWOfWBbX8zntbYFZG8yNod3PeFGVXM7kTb2ctVRFy7xoKc88h4UMFWc81uSZ8IAPmkNrWZ+tmH9oKa8ECAHYb074pcuf4h+985B/+bXf419f/Q5fKzTz838g+9DO+D93P0x4UWK2mlg7bv2iN9xczWrmZYM9D2yqjvqeZb+16ATk3LycMXtW4TaR8ibgNh2hcpj1YTivdUd3sUD5gIow/2DL/CM4/4NBUrN97YTnD07Zv+5ZvnnLvfM1T96S89S+sBS3AhDYneJwU9LNO8okuSmqjlB4urmhrUXOEQqNSwa45sAwroI0TrodZmyjpm+E8qx3aIbmQ9eaUGr8zNCVgbhQGOspCrlORdHh55q2NXQHR2iSf0GjpQmDvuHsZ+AzcJBnx/NsehjNDI+an9yIRkO/n+i+Xx/W0WVZkMJqjoCefl0ZSLRDwxUVRAfdSqRAzYOO5o4SP43koxC9FuPGmJr4zGA4as7FDDPaCFUQQ9c0nvcLw+G+ZfvIMnt2wuzFkuplh8usog+fEQ8Hwv6AVhrqBrPZsbw5BcCtT7GHgt3eUp8Z2hNLt7TozOKJimVVU9mORdHgg0iA9q2l6WSsdZ0R8MBrYhQzzq41fRILAQEnkudQDJoOeqC0qS37g2NdCArWtoauMT2gpFWUBJ8gRpdKxaOx4oyHecOegtjpHpjNchZrJbVEZzClEyAnHx9A0FqG8xjE6jRxzMRIhqC29MJeclpAXiAWitAqdA3a0SchvSqzy4DbdyMqZmPdhFWk8Rj78ZWybgYAYESy0w34SkCQPOZ7f53RMRif7oXRd3svkChSswjH28g7GBKw6CFyDEQwWoYuoRnZVypX/jmbmmawLy/SAyDpHGWD4aB6oKX3UwkDQHSkljHJ5yf7jXAMFqko9+R3pCBNNdUPcP3tN3+5/3lihUz1/VD/uKSZiSUy1VTf2/rcgyAqSrRnNOpYfz16GVQ+yktzL3GAUIO1eSZUmrreq0ANL9R9KkeiXYdSvtPNDW0V+uWFis5RxG0sAr5TiVque4kMgNoZmraiuUnU+zyrm75vS89queekqnt2yNw1nFUipzmpDryYL7jVS9TeoGuNOmjirRzEoSrYVx5deVzREbxEZ7bKoNNLeunEHNMZjzs7sO9mRDOYqy4fR8obx+Mnj/iv3rjPr375bf7ag1/jRwvx4/iZ1Xu8/9od/qh9QGwKiaB97tilWeP9vMKWYsLpXEfhOuISSisnt37LcHU9J+4M7trgNhZfglsLi0O38qdbgt0J82H+PFCsPcWVUDXm798w+8TS3Jlx+2bBVXtKfOeW89fEz2JzUrK9qigujZh3PrF0c8PhLDFq5h1aS7OpdMQDdanxpXzutkM6hy8VOstW8qxyAhZCMTDaM2tErrOCHYStwc803d7gKwFdQEAY51JaS9n1jXZbW/w+NZ4bg4mKGEaNX8tRUyapN+JvElNjOWY+5XGOFiZJNBHc8ecZaMkJMNoP687RwjnRQ5rXjMbIPdXUhm4R8Ocdet5Jc56b71ahaoM+JBPKDOb0wGVqTHXEl+BbTSxDLysysw49b2kuNIcHjs21obwqKa9EDnNyXuE2LeblBpRCbffEwwFSUlEZI8Qz7MGxu6dpNpZ2acQjBtjMLM2pZTGrOZ0dWBY1WkUWztCli71rHT7oHuQzOtJ0hibJVbrWiFlplqTkaOKR1KLrNO2mgE4Le0fH3kvC6zggUymq2RstEcdAUXiWVY3Wgba1KBV7wATEQ0drSYfRKqJ1xHt9ZNwaCk1bmF7+EoLGBzMkZwVFJKBSDC9BY4uONvvTBEV0ilAoYdy1Cn0YkoxUUH0qikRCc8SCyAwGPZJ19WMTegCiJ6+kP73kzInPSSjoZSzZ8Lhnguhh/MbMnIKeTqW74Zmf74mo4wBcdoqQE1nyvT4eq4zAk6CE7ZF+lguRFsj7/yobIydOZbaOyec2opQsrLyMBaVBtSOwMT937HBuxudvzCSpz8PRuZ9qqqmmmmqqqab6rNREdp1qqqmmmmqqqaaaaqqppppqqqk+F/W5Z4L0BnrheHY+WIjzSDQKXwh9up8pNCSasRiDKiAgs26Q4kr1MKuW6du6g5gTN3aKUOhkkjdEqYbMDrEBZWXGL3oxX/WjGWHVavRedOe6RWYjzZBw4AvL1dZxO++wrsM5T+U6zmYy7Xla7Fmd19XJFxwAACAASURBVLwLbDcVfuPQez0kguwUca+J1lJXKVVCRdGZp1ndg4vEcUxw5akvFLqRE1G9iFSXgeolNI8dv/P0y/zBVx7wV9/5HQB+ZPYJP3f3PbZtwWN/AdcWXSvctVAL4trgC0c7C6gkH3JFx6oSFseD5Zp2taaLEl3adIYHyw0vdhLN2naGpjNYE1jvSnxtWD8rcLeG2QuhMSyezihuOlSIzF94uvcsN8WK1ReEAXDvbMNu1nBtVkRlKa+VMFaSAW23N3RFGM5BBGaeXuFQ6J65oBP1XwxMB1YRajBTVF6upzlkKcs4gUZhNwpfKfwsMXbmjkPlKRcNznlmZYPREV811HPZx31R0e6NSGQ6kXvZ3choITFEaIaZ9zxmQej2eUa7Z66gxFiXUaqGUhJ7mu6pPJbyjH02Zs2xp4zuD1ODW4sHTqMsfpFkPdlcs1GYWmG3SsxnPUd+BZnKH40wUUwNwRlCkbxVFpq4bHFlh6o6ujPD5oFlt04JNXdL3KZk/nSG2wbKyxpzuUUlzxC2e8pPNWY/x21L6lNNN1M0K1l/N9PUdw1Xq4L9acFyVlMmQ9MsUdMqwujnynYsy9hL1JrO0Poh+qbrDG1rBk+RxCaIjUXVOhmEKmLydkHTe33EJEmJaEJaZ9dFCuMpZp6Ds/094kcUOJNMa3NpHXuDVQBtPcYEnPF0QQvzKEDINLhkBquTkXTwCo0ePGlMShWyCmwgdJpQacJ+xGDI225VnyQ1/Gc6B9kMNT+/81jyavQcT2wJgxiMQj/mgk3EDh2JtToyV+3NgxtZb7sYMZgAe5D99IUSqaOFqIbtqgAaRShke/nZ3t9PkeTBI34h0ShhbaTne8zPkt64+LvQMTIbRKX9SsyomMZXDIDW/T1BOFbUZGNXgkiRVFJh9ZIggHv1d9/2VFN9BmosjYFJHjPV9199N6nMJJGZaqo/u5pAkFT5hRrkpTm6iF8GOuTFWo0iH/PLou7EJyJrr/t0AS3JKz0NOumyxy/aukvNYHp5Dk4RbMS3CQBwOqUT5AYngov9izgxQiPyCu0VMcoLv4658VSEThEOmto4ahfZlp7tXOj/F0vH3dmW11drbsuG9bxivy3oDmlItAJmKI8YbgYgaGnG6+9sTLoq0p14oo0054l+rRXhGqqrLENRbC5P+e+3Yu7717766/z47GN4HX7FvcOHL85prirMJpkopqY/Hgxhq/FlpHOSJAKwXpasqpqla3gw33Cn3PLG7Io2SVXaaNj7gq0veFkvWDcl1xcz9rXjci1eKjdXlvKywq0lOtNtIsv3DWtOANjf23O6PDC72LPXFco7irU04yBRucGoPq0iFJEwl3QggGADodWoVhH8IBUJLtPzJYVCLpo0PrrWvcTE7FUvDTB1km/VirhJMhEn0pvm1FLPPXYm8qGqaJmX4luidaSdG3xnCEEROk23G5ptlQx5zUGN/BEGGY9p6P0HiGCyOWofoDT4I2S5QrCpmUu3VXRxSP4Ix02l7lSfqmN3oLwm3Oq+wZXxnI+dIXJ1NAxDMdwWKKDOqTzpPG0V7U5Tzx16IWBIddHgT5NU5aIg7g271yzlpWH+zDB7UVJcimeIudmjbre49Q57OWO2qgiFoTmVDbcrw+5GInabU8uL5QxKkUhlQCB7digdMCbSlA2l6/pjKKz47RgdcDpw6CRGOMtlQlAiRas66tLha5PMiY8bfHKTbeWExBym4zU+Kgrje98SrSI+JKAoN7xR4aMafECCImYUQ4feM0SpiNZBjIKzSWqKf3VFJ98LCh9U7+2hdEyASBQ/FeMJJhBsGkzZEwXwQca3erURzyBbO3hd5HPQj98AIctE7OjcaAgJeO7P2QgI79cfxbtH7tUBkAQwh5jASYnw9dVw7HnbBAhdBsMVvoj9eNXt8P8RGe/jpJY+PUaBIBscIxgmJUqpMICJYxlU+lqo5PNo1dH9nD9HPIqHe2kETKIg6Hj8namm+gzX337zlycgZKrv+xoDIxMgMtVU/3Q1gSCkhs8zStAQE1NcQFe+d+If8IdkathpmoMZYkpHL9GhlBny/DKrUuRhfnG3e9U3c3kmXDdDnCJaQJEMpPgyEkcv0jKbmsCT7AcyigyNWl7gVYr6jSYSD5r9ISVNNJb6xHJnvuXObMdJUbOZF/1M9K4uOBxSo9XqYaZSDd4pEgWrekBIN/Iy3y2TseJM0Z4o2pWmvIoUt5HVR4GoJXHj7z34Em+8cclPzT/g7msbvnHyOt+4fsCTawEgDusStTUSQ3tQKYVEw6UM211Rsl5IJKurOs5XOz4szzkthO1idaALmpeHBY037BuXwIEGc7oBoH2oWe8rbm8r1KVj8bGmuIms3k3xresFLx4UVCc18/M9O69QwfXXye4gNyrRSEPUtYruTmp8KwGGojLg8+y06n0coolQJvOBhCIEHeU4gWg0KIU50Ptq6A7UIY9dGSvtxtDNDN3ccagCh1WHS+at1nqc85RFJ31OVDQz25te+laim8M8RSgngCIzOdxGy78TgJOvt0mfx7z7IY0zk3xzehPUKA1ZnrXODI6eCaLQOzGTtVsxk3VrOa2DHwN9xDB5Vj03h9CDRrI++v3NY9XUwioIpaabGepTT3fSMJsJ0+P+3VtCVFyfzlhfVTRnhsO5o7qUFVdXFfP3FDy/hM0G/UyjjcYtxH8mrBaUlwt2DxzNiaKbW7q5lfjgDNDkPt9FOhs5lBWq8r1BsnXCsjA6MC8btIr93wB1a6mKlvP5nsPKsqsLtvuCdp/Q16BS0kq6V7PnRJ9ABbf7isKKh0x+juVUmeHCiOFx9gIJYfAVyWBGNwJJrPO9uWqwGm0Czok/TasYjF/zRc8EhzbR5RSoHEHuSKatidGW75fRWMkMoZzKFRl6/7HvjQnpqyMwLeqBpRH1MYuk98VIP+dbMt/rus0LDuOsB7bj8PtDQMtIbOT53JEA4cyYSr9veoZVM2xL9kMRrRrGdDxmPaEFfIn5eo3MT6M9Xk6OWZJoNLr/Jaa8AOiqU8fsj/5+ioS1+w7D2qmmmmqqqaaaaqrPQk0gSJTEgDH9X6ekicZYggbtPMrEfgaUqOTFX0XCUtMsLe1eKOogTAkBB9KLrIkEE4YZQYSirLxK4EGK4g1DtCqAUQKsRAWhkO/G1O/4IiYJTAQzvND3s+Nx9H/QxyXGBHL4TvGylUbnYrZjZlsWrqZKJqoHb9m2JZu2YFcLONK2wibo8oxlBkcaLaBMbnLLNFPsAt0ZtOeW/dpQvRjAEIAPPrzL/zX/Yf6lO7/Hj1Yf8075lB9b3OPx3XMAPtxf8Hh7ysfPz2kvS+xawBC7TdfJK2FCFDKML+cLXlroVumgS7moaiufq04Rlh269CxXgiJcLHacnl1zWDqerxZszIrVexq3TvT/RlHvC/YPNeWdPXrR0S0NNrFVxikTMUjjrw1kPYxSHkyQpgad4kjjkLqgYjIAlZ9jACzE1H14kxooJ4agPQDRj9WIOcgYLhII4wtDt9C0yySzmAcB9Ao5L0rLtkLax+h130nGvC9a0o/kC+k+abPU5LhxyuNOd4ipZyvH2DOjojRp/RgZU/ll1dLsak1Uugd8ogZfpmVSnGdw0hSPo1BhYKn0jWkCBfvGNpm+mkbYW93W0m41m/OUenRXIm6z/Ol2OaM5L9it5RwVN47TxQUnf+TQz6+J2y1hvUNtZDCqm1tm2zPc5pTmrKSba+qVppuPzqOcdnwhzwVfGkJpJeYVqIsEdLrAtqqOkl3kPCZQSnsWs0YSnyrHeiYnqekMXWfEYDXH4MZ0TRBpym5bslcl1omhr7X+yPtSZ3aHDuioCFERkoEqCCAC4FN6jdKBsvRYO+6k02XWwvYIIQ4pL4oBXOl0pkT0+ItKYyQGJT+7hHZlgCA/e6wABTolKYX0XFWdyKaC54jh0BMk2uE6BNPjjn1MMAhYlI17YwPKx4EJA306WNSqB+rGYEoej/JMj70ptlbDgM/pN/KzOnbnCgJmRBMHbNQMQEhUStQyUfXpMeNtAwODsL8YAshnxkxmeKhXWSY9yA7FpZkicqf6XNWUHjPVD1L9cckyE0Nkqqn+ZPW5B0FUpE/s6L0LsvTAa7ra4WeG4KI0aqlaGzBFkEal6vDO09VyOv3eYm+MsDvalDyTmr48sy2z5ZHYpcbZyrJ51nE8405IzZtWfWPpK/FfiCnCUalXXsK70Sx6Si8Yywv0QRO947qYE6NiUTQsi5rTQsCBhW24U+6og+HgHQfvaLzpZ48B2mBSDKij9ZpD46hr1zdVxnqsDbCq6S4024uS+oWjuE7N3DPHb1Rv0gXNX7nzh/xo9TFfrT7ip6v3AbhdVVyHOb968SV+5dnbPPn0nPjC9QwFewCzjxQR3DY1KgqaVfLrmIMvBhmJ6sBXBcHB9lRiRm/PlhQnNVXZohSYewf2+xnxeZK77KG8gqgtB1uhSk+sIqFOjZxLXjCJqeCrRLVPY8k3kqqDBpykZvTAB6Nmr+/yUmOT0yxsxFtFqERSoxJDw6TG2haKuAHTxASoRawBt1YUiabvyxTTnHxHRKoiM+Uwws3U0DyFEesoFHEA1dJ6jpq+1KkFPUi+dKv6CFydGAoZBIkj8E7uhbTNKtKqgC/F9yTqEZDikn+ClXPVJ9L4ofnN1zjLbpQfGCLjdBrdQrEG02jaJK26Citu5x2rxaGPZd7NG7ouMaMOlua8pD49ZfnpguKywb7coHZyv8T9nni7xoaAWc8J8wJ3WlBfONpZuud8ZksIm8qXMn58SowKLjfYkZC8cJoqoFJMsAAKik+DFjaTisxci1uktKTO0nhD3VrqwtLWtgdAcoVakly8tmAD2obeywhA69AzUiQ9RuJ8MwADMcXoRqIKfTrUODa7bi2tl5QiY70AIDnVKo2J/p7oAD8YKAmwII19JIEDr3b4CvEWQQCLaEcpJkHha53GYY4XB5IsKkdWK5/sRfJ41P0i+DzGCzAu+fMoCFlaZemZJPm+EFbF8U6aOvYgu7EQ8i2eo2gVA+gwukw6Aj4Dh/1p79lj2ftFRwapz/FlHr6Tt2WiSCbz9rz4+vSgpxotj/xOKm7VMbNkqqmmmmqqqaaa6jNS0ysO2ZRyBBAkI8riRij/vjQiTckNVcyNHLSzQFx2mNJj00y715G4lchZk2QL0ahhJpv0/VHEYijSzF/SsedmMhp6HwnlwYzM+3Ksb+89MipzoG98QxEJVvUNqCwQUY2i3RRcR8WudOwqR5dmepeuZuVqSu2ZmZYuGGzKPHUJLQppo20wbLuCy3rOuilZH2RmuusMSomx4mreMitbtvOS/VwAiPK5IT6u+K3DF3nxhSV/8e4dvjJ/wlvuOQAn+sCX3RU/5J7xzuw5//fyK/yjxUP2C+kqu2uN3ShsYkK4fTyib+sO2oUct24FIChv5FxmhkE7d7Qnjt0q4qsAJx3xzIt8BXAbhdnL+dRrQwgIHT3N3rdpO7mRCmVqfBIrKMYEgCWjW+0GTwVIM+vJeFLpiPeJ6ZBnrgFVCpsidlpYE42mS+azba1wMy37WZMavIhp4sAqSg2OytG02bOj93lgFPOsEguEvgHq5nFo0lTsx/4AgmT2SJSGMct20vqjjim+NuElmbqfwQ8rYzSmP76I+AVHy6BBNaP9TU10P7Odmj1lFCrGo/tHVpR2P2RpTGLPXOWZ8QJfOa7PHbtTAcXmZYudJ0+Q08j6tOTl2YrbS0t56Zg9nzN7KfdE+bLGPr+FQ4O63WL2NWUzQ4U55iT5/GTzTCXnKziFd68Ao1ZYX8GCrwy+0n0csp8HvLE0seK2EGbPfFGzTEbBMapeSjMrWnaFo23tIGUJii4ibLBOgTcCivTXUfCINrGGVDJl7scQA4jhnMekh9E4QjdLdwRAMdgEgsQw3A/9cqUXP51OjyJ2EXAsS3oUYr48fr4pBtDERChCL59RKhLn4nujtOy/2pneeDVq1UvK8rryM9hnAKCM/TUJxcjwNB1msPSGxj12GUCN2CS+BFOoZGgcsXtFSNsMLgOO9NHYPUtKTg69P8d3AzfSvqgEZJDieWOWX4IgKZlx5ROYVIT+evq0DTFHTed8xCSMJgoAqON32fpUU33261XjVJjYIVP9YNTkGzLVVH+y+tyDIFHJ7D0j6UgGIuwB2GcjO9W/8I5foMWUUtMtA36emgIXiGXEd+LAn+U2MHiC5JfgmMCVaBAwJIEkKlGso1cy+67zDHvadJ71TvtxBHAAdi/7LpKCkXFnzyRJjexe0wWHbwxN7djVYmAwK1qWZU1pOpauRqvIwop/Qsdx06NVYGZaLsodVofeaHHtJT3Ce0VnpSEqqxZ/njwKbivsXmEeF3x8uM/T6xW/efEFHi1uAHhQ3vL15Qd8tXzMPzv7Fo9eu+KX5z/ENx88AODxzSk313NYO+xaYzf6CASJGtoTASzMTjxFZs8ibgtFkrtUV4HwRIl/ydywf03TXAT8IgE9hXzP7BP1vtREO/q8UsLQyGMnNfniFQJxr9LsvibOPD4oAaRyBYV2AWUCNiVveK/7GfwYFcZ6mXmPCq3l816e4A2HhaO7NthDYmJ0AgyZQ2JitGBaYYvkmeu+GUfG9nhGHC2NXpZxqFYN9PyUNHEEgiDbDIUYKeqRmWlev8hbpKHMKTG54wpWPHS6efb8GJnFjnuwmNfznY3ZUSKIjumavNJB6pikZcLGMHW6T4DyEqJVtDtHszHcnnSYeUdVyQ23rGrurzasqprt6wXrzYztdUHxMnmGPF+w+rhi9qzGrA+oQ4vaNxQvwdSCuPnSjEw2Y5Jx6P55E+zIJNnQp490Mzm4bqHxlRjM+jLiK8tmYdktZP1aR1zR4axIe5wRqUvnBwBC6Yi3sTdVPTJWDYmF0RpCo+nlbXAs1wDiosU4GZdNZ3sgpPMaHzRGRToV0TpiTESlpJyuk7SaflwbCNaIPwgQu8SEIhDRic0xPHujjmmfRhKSoIZmXYsHTrAhATCKrjB0VTKXLQz6oPp7JQPIwojI4NwAcovsSvWeN3mMyfN42G7UQzpYNMkwtVG4LZQ1mDqiU9BQcHKd/WwAqaN9ZQxn1l5mi8TR830g5aAzo8OFHmzN50HpiDKhNwcWWUw6xpSOEyMyBlwQeePoOrd/DAAz1VRTTTXVVFNN9YNek+J3qqmmmmqqqaaaaqqppppqqqmm+lzU554JghpkKeOZvDx71seC1kM6QJ/gEsFpMVpsF4Z2lWdsI7EIdCeeUOhRzOmQIKO80PFjSjmQmcBhhrun8SemSrfIMgvV74Nu1RA1mo6jp/2nuFHdxpSSMfgOgMxSdjMgaoKX+Nau1n1E7s6W3LhZH7daWE9pfC+XyWV1oDSdxGWqSBtMnzZhTBCjxsbSHWSWWaVZznye7BbMTlE9NXSbBR88n/F+cV+OofL88p0v8QuvvccvrP6IHy6e8tbFSx6fngLw+MEFv797yLubu1zu56wPJSEqmkaOIUY4WRxYlQ0vNgsO+4JuWeHWCreW81FeRcrbQLHxBKewe8O2NbSLJNUoI34mMhvtITTCitALGQQxKEJt0Dsj3i0pZtYcMq88yUyswtdq8N3IH2vwlSdUiliCteKzkOUnwWtiZ3qPhhgVRdHhkhQhRsXWljS2wO+NzJy3iq6ReF1IRqrJnyaPFTFkHLEtRpKZzFbI+2ga8TOIQUEzMKbGZRqZDTeHkWwgjeHgEHZJnmHP8oIsP/NArTBaxm10iREQh9l3kQfI/ROgn9HujX9jmrnPLBIN0cchXjXfWkmSFnUUqUO6L4o16CSrsntNs3P4mWU7E2bUtpxxe3rgdLHnYrFjVdU054ab+yLNunlYsn/gmD+ZM3tRUV51lC8P6F2DvRJakCks0RmIEb1OOjlr5P+A6ExiqhgwimB1kikl6dNCU59qgou0S2Ev+b3Cb5JcxkW6omBfenalx1ifYmqTSa8OWBsoio5QqcTS0oQmbT+xAuh0YiLIWOojjVPpVtE1ijaZuDZOvEVA7nmtI0UyXlVKzFeLxARpjKWuhcEky0eM6fApDanDokwAB0RPaJNcJ1/HFCPN2Ouk1cJiIF//IMcSFdqITDE6GShdaekOhrA1mHp0Pyh6047+XoeenRc1RyyznqmRGIKZ/SH7kCWIiRW2VSJPS0wQ3cU+IldYVQqfk8Bg2FZm+KXDHhsJ90cfhNGhQo5JzwsNejqV0mHGrKrsBROjbN/YIGqY7EXkFbEarW+qqaaajFOn+oGr72aeOklkpppKagJBohhNjt/yhZ4f8VH1fhx6pCPPNOWopGlza0nocBt5YWyXisM9CAtPOOkIMw1BzEh1op7rJqVs5PVmoKLXZEPWhccSiRjVkTDLHgapQVFDIxjscBjdQuFuFSYlz+hWvCJs6r10ikbUjcK34Dsj0pyMcehIayytKdgVg4FifCUyUWmk0bD/H3tv9mtLlt95fdYQ0x7OfMe8NytrcrmqXGW3bcnubkxLqNUPYHhA6id45YEnEEL8BYhHwysCBDwYCfGERAt1I4SQbRncXaZs1+CqzKqc807nnmHPEbEGHn5rReyTbXc3UttUZe6fdPPePHuIiBUr4sTvu75DoLAeo8Nokpjo8NpEfK+hN3fUDaqIAsQgQEixUNhV1gZBNAU3H1f8zy+O+bM3H/P3H3+H3579iC8W1zLO1Wt+s/kpb8/vc+MnbEKFR7FJhh99NMzNjkr3fNCec9VN+bPTRyw2NcuVvGf9uqB5ZqiuNcUmUq4j7rUkTAC4iaTymF2aC07hAng7NhlkaZJX2I0a0mLyXIkp9cfsMgg2+gxEK1HCvjb0E4tr3OC7AEiDGhQ+x1joiK48zUQ6qsJIgkicKnoTh8Se4BRuui+7yT4Co99AnivZMFQ52T/fJBNSk0E/lVJZxCMnpxgNIIaLQ6KLyTIPM0Z8ir9Ikng08v+hjINcJScgKa/QMULaD+KYeiFgQGoenZL+MAGUYyVz3BhHj4X9FBs3Hnu04t0zvBbEv8HsxAy3jIqwlvQhkEa4XVien1TUs46m6phVHY9PRbq1mRXcHE+4uV+zurbUr0smzwsmL3qqK/HsUJ1Dtb2Yud4soG2JPqBMAimsBaWxZQFagxXABJ/jYy3uYk5/UrE9t7QnYiDrkgFuqCQ1KhQGXwdcGe42sSbiG0fddEyqnlD2aAU+AQpGR5zXbNsC50SiEnoNTkx5gQEUMRsNuyTnsRFf5TQjkWD4UlPVvXiY6kBTuDRf5X3bjREAEZHx5JjgEGSiFoUfpF8xqiHOOZd3WvwtnIZejWBXr6DTA1jmi4iaOGxK2SnrHm8DrgiErRHvnqyk6UfQUDmFsskb5NPqq3QtZS8geX28XkDmsgBtci8OVmG3CWRJRtxmF0VqaRIYl+VppF8D6R/qzg8Z37MHXCoHOmrCcONRxBiIMXujaAG4ht9zmlHrxCCvG74/iFSKT93vD3WoQx3qUIc61KE+C/W5B0GUh3KRAIj0nC2pDZFQRJRWyRNkT5OdmCPRMEQhajcyRUwH0WhaIBw5SOkOQY8JAarThFKSC+w2ReX6vZ4lrwImkEMlBsKQGlIHYpX086lhRMfR49BE3JER4GUnTb1p1WDUmhs+3SXz193oEzLsQDb7zKawew1prqgiwUBvIrvaY+ueJvkoGC26fK0DLSVeIQ1KFptHiGXEKdmOSeMwrO73klBQ3pS8f/WY/2pbw5fhtybvADDXkcfGU5TP2ezFGOxDLbto0Srw1fI5u1jw9ekzNqHkNqEvH25P+d6LR7y+nFC8tjQvpOvIbB+7AqsUdgvKiSeGWwpTAEaDRNOSYpEZ4l3lPDD4YAgbI68Ej/4DdqPwpRITzKlOHgGpYYoK1Y1eHwoIhWU9le2ricPYgDYBXQSJzQzy/bHK81Ve00Zej0GhdRz6m9BrMadMAImd9/jOjLGmWyPNWK8IG40pEmiwl6YkTWEcGkMVJPkISMBf8ozYQTdX9HNFsHtNY9x7bxy/c1jYtuDt3nvD6D2S51LQCuMTe8Zw19RRjSvsAgQJqJM9ePo5hDZFmTr5PtOOwGfsFHarcOuSblKwm9Ys5z0nR8LysMZz72jFbrJjd9+yXtZsHlc0zwUMAahvPHblMX2gCAG12sJySexlI3G7QylFNOki1Bq8J3YCeEXn0C+mNKcnlPdO2D2e0B4Z+skIvoYqma1WGl9lM9A0BjoSOs3WCbBQWM+sbikTq2hayHZab2mdpQ+atk/MsJ0wYrpdQb+xMhe75JXh1OBrQorO7XcGpSNV1QuDKU9FHZhWkrojLJURCAEBVEF8PbSKlCl61+0Zq5rkO+ScxvUWv7bE4Z4xevSYXfLe2Gn6ZC5rpk4ScCqPQ+7JAmqo0f6kV+hAMuLdm0P7czT9/wDEfSoyegQ3xOtGleIvBAzeIMVGQBCzy3Nzj4GVgJZgGS6EuJ8Ok+/B2RDYqZROMzJi8IY4xF6nyPYMtITRhBsd5fqP8nOZTBqVrvtDHepQ/3T9Rcapn64DW+RQP4v1l0Xr/vPqwCA51GetDiBIgPJWVo1zIoDyuSll+LmvuWMwNyS3hPRQq8T8DmRVrroBFTStL/BTPzysDkyPMuCNrBCqIA/LJuwxCGKSExh56C6vJaY3p5KEOo4rj5kuHhhAEl16dN0TvMH3GtdqVKcxa3lILtYCiNiNGGZarXATNcbqwmgOmCN+IwMwAgmgyeaWBoJXeBNwiXquVcRqYZFoHfGVpt0VhLVsRLVaHtDT+KIguHEMooJyKSajsw80q/ac/zL8Fj944zEAT+srvlV/xES3hKjRKrAJFRMtK+8hahahplSeWvWcmzX3zZJ57kIATeRPLt7ge9snfO/2Md/76DF+WWAXqWnaqkFWZHZg+kC8hSrF/Ppa4as0TmoEy0ICIIIR00MVRpAns0zkDYl90ELYgtlJnK2v03kucuMuYJXsqVkO1QAAIABJREFUuiIsk0RiJqa85qgbjRDVXjOFUN/LylEWbpAqNWWPSQ2Rj4rOWUKUlfqzZsPVdsKmk+a9nRSSLNIbXGNwTuGmepBmFcsEHLkU8Zzmsd2N14PpIsVWTGlNYkTlaNg8dgOraX/Beo8ZNQB0Ia9+j+MoYEWSLWl1d96yD+4l1ktitAySnTIxWOzIkroj+0n7VqzEKNftFH5juEzyMVM7ppOW0nrOJlvOJls2ZwU3DyZsnkoaUnlpKZYW00KxqilXgfrKYbYCgphFi/Jelvh7h3IeegdOXg+LJWGzIWx36NsFk+UF1dmM/kQm2+7U0k+FGeImCt2MDBoZAwEIQlew7TVt7eh6S5lYGrvKUhlPYTyN7ZnpgG4iE9ux6OQYNn3Jzlm2XcFuWxKSHGwop1BB7jV9NiMtPJ1L46QDTdlTlz0uaPreoPYuB5vuFQAhKrQSRl5OlcnAXFX0VAXsTGAXkSYeiFYPkbzKG5E3LRUhSeR8p/FlgCLTjyIYAXPzdeODTkDcOD9hvC/JDxNwsX9PV+P7dJdNbiXtiEpYUCDfKyC6GuK7dX/3+zMIGAphVcmxjfsTTJR7cU42SwynHB8etUogYE4kIkln0us+Aa2KEW3cPxif2HCfjic+1KEOdahDHepQh/oM1AEEicKKIILKmu60ABhKhsa/n0ZIsZ3o8QFZOTCpQRuTH0B3kXKh0E7RbyzRCmMiP1DnB9BowFepUY5qXHkO8gAck09CsRW2Sm4cfT0mCgy+IIm1AeB7TZz1KBPRpYciEAO4WdLebw12aaiuxB/DdHGI3YWx6cz7HI3aA0X2GssxjIDgFPsrhz4KRGKVNN0AhfVsjawq94uUD+lHxgnF3up/EQmVxOBW1zD5RLHWp/wv198G4Ph0zdcuvsBXpq84TnEsL7sj+jTIy74moJiajqf1FV8oL3lavKZWnrR4ziM74yvFJX938hFXp/C/3ftFfrh+zJ9eCdByuZyyvpzQvzJUNwq7ltSg3OBn5oprFJh0zuzIMJDYUzlXvo5Dc6XSec7sHJEr7bFF0vxyE6HUZ28C1StJeknxt8ppeq9wpcXUDl0kWUG315gidPcQFVZFmsJxVO2YpLQfrSIuaGrjqIyjD4bWS6MLMKk7XNCUlYMZQ8RvSKvzu22JNp4YtMgUWgO9GpJT7FZ8EeJ1pFwFqkVI0dMJQCuyX4hK18PIsBl9EsbYW9J1kscQ5BoeQBE1fv7T8bO5qRMZmho8EHxiPoVS5p1KbJB9Oc7g6ZP+rR34XsbI14ZFU6BKz2rSM2taZlXL6f0t3YWMw+XjKdtNlZg3CrU12FWJ3QiIUSymmDYOciPbRkkVSQyz8ranuFyhFmvCYgkvX2NXG8z1NH1+Rn9U0s0N7bGmm8sx7Y+zNhA78J0llIZdVbBLY7Cw4tljSj94AU3KHmoGtkhtNmgV0Cqy6is2fclyVw2Mka4t8J0mbg3sDH0U8GwASnSkn7dUhRt8QvL8BGGEFMbTezOkIPWdFYZTql4HqkpRWkdpPa7wxExLqkCbQFF4NtMKtywoFmZM1VpoohVflWjleow2go2Qrlk/C0SjMSldK5SjLAyE8URA4phzgkv6PUH6ZwbNtFYEHYUFmIBNX8cRBNmN0daD3DIIQ1D7KLIyO96Lh/ltc+rUyNJTe4wUkZipAdfI+zT48QTE90PHwdck++ykC+JubO+hDnWo/8/1L8IW2a8Dc+RQP8v1z2KQHFgih/p5rM89CBINuHpP3016+PRjoxpMNjvdWxXzo6dG1PKwOTxgllBEhXKR8jbRso0amj0QanSwkZBYBEGnh+MsFQmIQWSWWjhZ2MsSALtTd1bGs5QmAzHhVtMdGUIVxCjSRlTpMUmaE4uAqwPRWkKp0wp3lAd8BISJTqEHSUxq8AN7DYmsIIYiSqNfBsy+n0XQaB3onKFMfiFN2WNTQ7WIKskuDCpFPeaVWYBoZR/9VBMKTX2paF4o3EKaxuV5wR9dT/nJxQUPZktK7bjaTXl2fQRAvy1ARXQReHC24On8hm/PP+ZJ+ZppYoP8YvmCByZwYaZcGPjy8fu8nP2QPzk9B+Dt9iHfXT7l7dt7PHt9jFuUmJWmvE0+Do6BuaH2gas9Rk/218jsHR/Hht230gjp5N2S2UX71PtQ5fFNTJOdGqJdswmr22lioTCFpyg8zoTBR0GpOJiqagWVdZTaYxOYMTUdWkWO7BatIj9aPmDZVnSpsS0SU6Aqek6aHafVhof1kip1litfcZTMZvpoWPY1t33Nn7+SKOPtpiSsLdtrS/3KUL+O2F2kWIdhjHK5SolcZibze5BGBTWwrrLkaH+cTB/xxQiiZGaJ8XllHFQRiVZW4TOjKUsQdD+CilGR2AHcqUGOkM9Nr4YYYuW1mOZqQ7so2DUVt7OOk/mWi8kagKcnN4Rj+dLSeLauoHWWdQKblusatyvE66LV6FZjdnq4D9mNpb6qqa/PmbxoKS43sN6idjKX7YtbzKKinFYU5zXdXBOsGKgCuDrdgwqFSX+HYgQuM6AaDXRFZNsErqvA88ZRJk+Nad1hjee42lEaT4w9uom0pVy0G+sx88B6W9EtS2IyLVUbk84jbDtNN++TVEZANZ3vvSpiTcAFjVKREDQum6OmCshJipVKn42D9EmpSFN1XEw2dDPD1XzCopqi1onZtZHxtOm+PQDRTSBW2XslEHQEJUa1ceJBxSHGV/VyvQ5+TollMTBBMmCZzX2dQodIGMyphcXhpgJwxnUCQvZjto38v+kjMYHEOn1ehUgohIEWeiUgqUpAIXvfsT9/Mxskjm/IbMbMUNz3lCKxUA7GqIc61KEOdahDHeqzWJ97ECRY2J2ru0aPHmF7JGBBBWmAYpkekoMSZkVUxFKB0tiNGpt3A65Jq/xdHGQyIpdID8KlrAT2U/CTRMUvR5bFfmIHiDRH+fEhVbnkWxEznXrchrxBqPu+MtKkVxE3MSLNAVQVMLUjXAS2E0t/aygXamQY+LHZ1H1K2VCJjp72ISBNpySoBKjGBBO4a7bnfKRzhqpwgznibLqjqwxbVYlxn1PcSX2ICqyH0tNZQ7SW+pWiXKR9DBq3KXm9OOXqbEpV9zin4T1ZGa+2eYUTnp1XPD855v2LU473WBCn5ZYnzTXfbj7kG+VzvlJUPLIzHlkZiH+l/hE38+9z+9Dw/e4hP9494iebe/zwShr81a4CrylNoGstMYiZZExNn10YTAfaK7wngV5xWDUW0CSiajEK9bX4amRae56TUcscdEPyhLxudgKe2JXBGWErlaWnqvqBqaF1SABIHOQwW1fQZUSuFFkQCMD14c0Jm12JTyBKCCp5NIhvxP16xdP6ilMrzb0hYFSgVj1aBQyRdSj54vQpAFfdlLUveb2b8uHrE9afTKleaaob2XyxjhSbiGkDRiuKDYOXylBK2FXajVKB/fmuosjRhPmRm/27AIt2ihCFbRSSv04GFZVT4ONw3SnP4P0jG2KUluxtdGCj5GYXUFtFXGniVcGrec3VsczHyaRlUvZMSwGdGttzWm0GhtnuqGDrCnpvaL2h7S29Nzgn56nrDZuNRa0t1euGyYuayctAfZlAkMUOtevRtxvqzlG9toRCE6qUHlNqfKPpJ5p+IkwcAWPTIZh0X/EiwQilTuNY0CV5124SiCbyIvn/KAVF4akKAcRK6zlrNpxOtnwYTnGtFbPedF9TvcIsLN4rNrVFl5JikyUwxgS0MuL9kZJmemMHQ+ZszhyCpmstRelG7xrAe01vDS5qzus1p9WGT8qe6+VExnBRoXYiCxQJSgYWNC79O9YebCSk+72uxXfHZ8mN1/idETlfvk73fIAygKBUHME7p4a55hObyFfiO+VrmbPDXEpMppBZYl5YUDrusZj6ZKpq5fwJSD0aLiecKF1HcVS85F3M48kemyXcva5iFf7CJKhDHepQfzX1z2KOHFgih/pZrr+MJXJgiBzqZ7n0P/8thzrUoQ51qEMd6lCHOtShDnWoQx3qUD//9blngsQisn0YMNsxFlWSPuT1TLvXnRIaNrJ6mFfzo474SUB3Gp1WiLOhHUn3ndMmtIuoJDeRlcO0LqeTpCQZoeYakkYUhEa+Y4wl3ZMDRNAmrTSm/TZ9xNyKXtwn2rtvFH2KTfVNxM81euIwxx2uNviZxazkGO1OiS59j5UA8v1qT1ceypEFoguf6Ot7x5Bicn1UeK/RajxAayRON6Wa0ncW7zQhrbiS4jmjiaja05+D7i3VdTpPXZIkbA39pmY3KYllZHI7rs6KVwTo1uBua17cljyvPSp5Z0QvGoDZyS/z9OSGv3PxNr/UfMg3iksAHtuKJ7bmCfDN8hY/vebjkw3fPb8PwHN3wsrXeBS3bsKL9ogP1yd8eH0CwEZPUVdmkLyE7Si7kB1Iq7Vp5TnOHUGB3yX5QKsHxo+wWuJguirzIDNBFESL7zUbr7CVw2STXC0MnRAVm65g2xW4FD0K4tMCDKkbi8upGEtm5lOhUEqYPMuu4tIIdeHKyt9zs2PpawyBSjsuiiW16nmrljF8s7qiUI5a97x6MOdPnjzlh6/vc301k2N8XUpE8UKjPRIhm5RR+ya8oRTqv+7j4J+QySxRC+tKJxPWuFH0M8ihQb6JQzSp+P3I9Rvy9bJL0qU4KgZUYoSNk5mBqZRNcIfrNclvcpqTaeX6KW4t/jJJRSY1y8yYKsQwuKr6IQGlSIkouarC0ZT9sHpvtchEXNCsnlZcXTcsXhU0r8Rxs76saV57ytses+3Rm07YAyliV7UdsbCEWYM7qeinFjfRdPmeUOc5tTfeBtAKX6bxLky6r1lCURINbJrI+ih5/tQOazyzsuN4vmVlK7rWQjIF9RuLXhvMNjGmrKEvgnhyIIbOvQkoHbHWY0zA2JGOUBSeEJQwvtL8NSYMc9n1lu1G8yJo2onlollzMVkPnibLumO9qegnBWptxKQ33SPsNn2H0sQYB5ZIcFq8lTJLz3i8ikQrCTzKKzGEzSyKZECqFeAiKiqRLA4R63uSx8Q28lUcPamSRMXXisKKobL8LkhjlOSPKgBd9jUBn64PSNeFBl8ktkixN7HZm9dx/F2n94xZlUr/OUTkHupQhzrUoQ51qM9gfe5BEHREnbW4zuDaJB/oxIxTp6jFqER2YHb71GcxrAtWvB7k4TObZYo0xhvw5agbz74P8ClZS5vo+HW8Y1yXjRhVBJ+d/NPn/NCpyc/yvuYHWtMq7Ebo/trJ36aTB2qQBsetNN2JwZ914hVSBlyTqPNbg1lrkRls1R0TysFDwCbzWBMhp0QEhc/UdyONd1XJQWeNeu9lGyHFXVZFj9GR1gSc1/QpycEZQ9xYVCd+F6oMdCeBbIaSU1WKtRy/rQz9URy8UkIljarpEhgSFGZn8I3Q/EHiae0Ggql45+iYP3/8kIuLJb949gKAX5l/xN+d/YBvl9IhGqV50854ZJZyLPGGQMATCTHyiVd8Z/eU/2v+FQD+cfUml8UR4dpi1xrTKtR2NOzMHgyhkCbGNxpVO9Q0mY+WhrAx4kGTG3AzzpOYUofsVuad3xncVuMmBX0CMbraSeMaFLETYElv9eBL0iWufE6YKHvZH3fkh7kWg8U7Q9tZXi5maB1HT5GqY92W+KiwOnBUtxxVOx7UMkZT23Jst3yxesXXqmd87d4znp+e8KwToOjt9X3eubng6nZKvyoxC4PZyPnKppSDgXARRHbSKfFaycknOmLXiupGJF12G0Ep3CTNubnIYIYGVCdZUpoHnnTdDjmpjKBJrjhetxn02H8tm2x6JYaTqlcpCSafNyWmnHYESfsy0qZjDFUU/x4Th1hjW/jBj6Mpe5HRFD1nzYbtfMXtvZrbhRzk6qakurSU15bmsqK5dJg2YG9F2mUWK7hdoi+vqeqKctoQ6wp3LghFNy/wtRIzTgOukus7GDWMw+jRksZSifFqv5Du280Lnm8LqlnLtGklkcgEZrXsw3ZWsF7X+GUxxNnizDCwoRYjUUzElwab/GhsBoqMR9lIq6yAqnvzEMD1Bu80m1VFuyvojg3zqqVKJqzF1DOpOvojw2Jd021K3NZgNno0u/YK0v1CeQjBCiiZridVBLmfV14kkkGJ98keuKG8IvYKgyKoiI4KlUKpBo+XrRpMsbPpNCTppSYlgWnxA+nHuRsKkmGunA/tIvTyHp9+v8j3KXSRwK0Usbt/34gmjtfDnt/UOKcPfiCHOtTPSv1FUpmDROZQP+t1kMkc6me5DiBIlIQI2zhCjuwMiq606FYejIeHzT3T0pwSEQpFHyVpIOx7BezFfvrkM6D7HHGaHjhjYlo4wMm+5BW6T0d06j55cuT4yGSaGgsBIGIR9xN4cU6aE92PCSami9IcAmyhWIJda3ZthTv2qInD1LLRYAOulFVft9PSrJCZMWmctABAeJWa6jSkaVaFMuB7TQuUpcNajzVhGCcfNL0zaB0H3wqlJN0BwKpIv5OkEbVLrhVVoD/KzYAYztptZoUAKNwsj1FENdLI2O3eauxWQcgeBdKUVMtIdQP965rFUcUfzMQY9feOfoHfffDrfP38Jb9x/C5frZ7zBXvNUysnaqbrO9Pp1MAT+yFvlcKCeFJd80+O3uTd63NuXswxSyNA1J7xqW5BtwpjZCXbTwwhe7cUQTwKvCJ6NSag5FVnGyXRx42moaZVhKUhVLKPPnlCaKcEENpJys24Ms1wbuX9Ah7o5EXRz/Rg6Bq8wvVq8BgAuK2CNIzJXPimiMQq8KNjaXzLUpJA7k3XfGF2xVcmL7lnl/zG9CcA/Pr0p3x8esZlP+f93RnvLc95tpzjvWaeALRZ1XJabTgtxRH2tq9ZdjUqNc9aRV6tZ1y+PKJ4UTB5JiBg3kk3UWJEWQUBOjJzIxv5lgJaZnaVirKafwcEgQEoUslrZgBF0j0hqGSCW6RI1D13StOB6kYWiYyhGs2Mi5wuFAegpK8DXSMnaFN5lIaidMyalqboOZ1sOZ3ImKzPSm4fNCzXJevLgup1SbGG+koAisl5TfVqg75dE29uiYsl0TmKjwVEKY9mxGmDO24IlcHXhn6m6Rs9gEluou70xsqTIqTlh3arcKuCfmq5Pi2xlaOsHE3yDJlXLWG25kUzZ7ct8VsDTqMy2yykMXeSNOStsDBiuud4rzEm4JwhBoXXkdK6IdikbjratkigtuFm2dD2liqBKaXxGBU5nqyZVy3rWclqV7FZVvhNRiblfOJB9VoioAGfzF9DKfM7VmI0rYBYBEI24U3XarQalB6MkTPzb/B2igks0VG8Tvb8Z7JhayiFRRfs3d8H0aohQSYmAJyYks5Ic9FGYpdSkJqUKJOYInl/9o1Qx5jcBACF0bvoUIc61KEOdahDHeqzVAcQxCv8ooQyoKx0gcYGwrwnTjQuJRvoTg3yApUkCLoH6dcT9T4zEPYYG/IwKytw2oFOI65dYlcMTI7MMMmvy88H2UTufdX4IB2KSNBqfDjei8iNpaziKpeo+b0Y7WUQxrRxiHq1W0W7sHSnGj/LUZMeVQZM6fGlJniN0lGwgyxX8fLQbJaGYqVRfdqHHDVZGTH+84pdoylqh1FxoP9vO2F9BK+IQaFyQ5oX63UUA1qv0K2GVqfY3CRlySvrJdi1HKPpocsr6xaog4A1lU4r86lRSefBzSP9kXzebqBYxYFZIlXg6wv++PSCP3jjF5g/XPKNey/4G0cfAvD1+mPesDdMlOOxVRzrhmPd8LfTPr5lv8NvTN/h7YuH/KPTb/D+7Sm3i6k0f4BqDcWNTnIsYQ0Eq+hOZPv9PEIjRo0DeyGoQZbhMwNJjWwdFYQZkk0b7UqkUsplMA/KVRTZCeMKcAZB+skoNYKUbpRlHgPYNFawRubaXkJGtAZ3JYO8NbBVkevylB9NH/H7J1/i8cmCb548A+DL9SuOzZpfnrzPLzUfcnM04YU7pg0Fx0Ya/DO7Yq63nJgNJ3rHLhpu/IQ+6VEMkefumO8+epM/ePYlXk/OmL+nh/SW8iaN50lKedqTF0HabxuJURrAmGVfeyCIRJCmmNz8J+69FsR8NobRrDLOooAhaRxVlrGpzKYYgZThHtArTJqjvtX4NrO3DBHY2YJdWaFrTzPpmDeSzFNbR3OygBO4PatZrBo264LVQsaouK1oXpXU18fM3zvBXC7h2Uv89bXswPU1aIM9O0HVNVhDOJ7i5hXdiXTPu1MxWnaNGuKgCeOcyAw2s1P0bUl/bPAzzY2RQZhUHY+mC5qznlVXse5Kdr1lsxKNl9YQIxLxHIFeE31kIKK1exEmSWIHI1PE6gBVjzGBri0I3tB2kT4BelrHxDwLlFpSbmrruNKBXSnHmCV5MSiCAt3rJGfLE0GJ9KTS+LlG1R5tAzqnBxmZP9FGggLVK0JQw+vaJdZeAkfk5O6xA72S+2xmx9iIUmqQIaKhN2AKmXMqgbvKiQwSGGN3eyDE9PtC4TJpRimUH5kgGdTcN9b+p8yJD3WoQ/1M1b9IBO+BLXKon8X6ixgiB3bIof6663MPgigH1QubvC3kAdLVAdU4lBVgJKiIrzQhsyFSRKJp88MsdyJ2yX4BWsAP8Q6IiAokd/gRTXrYzYyQjjtymPwgKskTDNIYgOBkZS+mVBtJD2HQ7kebwIhEtVZVYoYMIEhq+teR6kY8UYq1pjuRHeiONaEJeCUyHF14TCHaoJySEHqD2hqRlKwZIhdHWnlqBFtDWGu6icXNDJOZMARilOSRsE2SFyCnuQC4Iog0wIhfhup12kZqXKuAKyK+kSQLs0n+K0PiQ6J/24ifiowilHLu8jZ8E4hTj7sAWk31ymK3UF3JNqpFpHzmiO8pup9otuenfPfilD+6+CoA5b0Np/MNk6LnrdkVv3r0Pr/Z/IRvp1XjN+2MN63nV8u3+VL5krcvHvKnqyc82x4DcLWd8OzVMf1NSXmtaV4qykUc/An6lQAibhKkeTdCQ8hJRbK6K54NumcE2/alV4G0aixeAaGAfjoyeoZElDTHYlp1Lpe5ocrnMw4rzTFHbiKNk5ukpKC8Yu0YWSvss4cMvin46XzOT07FV2V2suHRfMlXj17xheaSx8UNX6ueDR48AJogoEe0lNYz0T0P7XJgFRUq8AV7zbeqj/iV6Qf8o9Nv8ofNV2g+EL1LsYLqWhGsxp34YV5l4DN6PUpagkL1WqQ2e6yHmK5D3Sv58Z5fgly38Q6gCQzxySByHqK6G5/sx3Ea2AEJHMmyCj3IKDLTTMYxWsu6LthMBECwlaOpe2Z1y3GzY1Z1rOcF7bmMgfea60WFWhuWT2c0r6bMPjqjfraSr71ZEq5viG0HXU9oW9TrksJayllKuDmbE5qC9rxid2LkvO81ylHJf6JJ7Ctt8F5x08v1sKwdPmhO6i0n1Zb7E5FMXR8J1WTTF/ig2XTFkAATohpOg3d7shOniZ1m22t0Oo/axCFtpijdcK/KYIlLgETnLFaHgSGiAFskFlzQ6CJQlA7XGPqiIKwMOl0HdqMod4qoFG6r8I3BT0ZfE6x4migTiIUagGufmIahS5HYexHn+2CYoBF7vL59CRYQTLqvZ5lSYjWpFJct+5h+5uPANlQhDvsCoPei3aMS0G/4/aFHWeGhDnWoQx3qUIc61GetVNzPF/wcVvPgafzKv/MfETX4ZDgZSuiOA2ESoAiyWhz3/AI6LXThXh5C7VYe+u+soiWpSjRp1TmBGaQHeJNYGXlV0LTSqO5XzN4Bca9B2tdtp+3sM0iy14XQ8dODbKb/s7famBrl8laaXd3Le7qZvLE7FhaCryOxkqZbtPBRIi8Beo1dGIrlGK27X6Fg2L6K0iC7acSdS3duGi9xlxuL2pm0mjk+pMfktxJzw9ppMRPMq6c2DtGb9Cn2slWjtj4xcEIznkPlFXqnhhVQXwfizFNOO4rCs92UhLXF3go+WCwUkxcCFBWrgPYST+saGYPthaafCQjRnUT8w5ZfeusT/u0HfwzAb0/f5SIZiQJc+w0/6Gve6+8B8KI/5p/cfoF3F2e8eHVM8UHF5LlCdyMrxjUSpeymUeKUi0gc6APppIbkkVGk1V0PZiP7mCUwKsgcD7m536Pfi++AzEkVpImyu8x8GueOdgnY+tR895WMQZ5/+zWAd7npT3Mh+6KECvp5wJ86js7WvHF8y9PpNYUKLJxclKu+4pPVMVYHLpo19+oVJ8WGKml6zuyaJ+Vrvly84kx33IaC/+7qb/O/vvMNOcR3pzTPReazuxcITcDMe4rkt9H3ckAhSRnYJSaC2bs/pmtetwLGZUmbzNVxPLJ/gxgkR9xkPJf53GWzzOznM4xVYproNn3e7AExeaX+U7KJcRyDxJo2nqrpqcueENXAvJpVLb03+Kh4fTulX1QUV5bqUr6wuYxMnzvsqsese8ztGjZb/PUNsU9ggVYoa9GnJ4T7p3TnE3wtBqEgbINuquiOhB3nG/FL8tV4zcaJo551zJqWB7Ml9+oVZTqP190EFzRdOigXNK23dMlHqPeGbVcQgqLdlfhW5HL75wgTBxYbcMdYNQSF1hHXW4JT6CIIeyeqIdI7tLKt+fmaynpChNWmptvI5DZXBeW1plil7ywkEj2kY/RlOhfNHkqm9+ZRL/If3cl9KEsfdZbTqD2gV2cAY/z9IxK4vbmXwcfM/CCBIUmiZXeZEcIA/gYzsj9CyQDW54WAmPysPv4v/nPaDz/cG+BD/VXVr/9yHf/oHz79/3s3DvUZrwMz5FA/z3Vgi3x+673/8D/+Tozx1/9lfufnngmSG4p9mr+KYFqNrzWukfSX/VVhFUTHHesINtLV0oAOK+v5YTU1MLpLjA89ruB7LbKH7NkRlcLocQWZMHoE5AYy7r0++DhEBmq+7kdpQ2ylucpARNTiUzCAJY0wX3ytcBNFsRIzSdMm+cBCYTqhffsafKOHB/Nh5Tp7bSDmezGZSeZ9y3Rq7YXVRSmmAAAgAElEQVTlYtci9+i3iVp/30EtbI9YBQIaHfco2k6hd+khXUXRzscRwBjaChuh8nikCbuzsp7Pl0rjD0Q9siTsRhNaTddp1NmWs5M14Rh4LK/33vD6aop9VdC8MDSvIvW1p76SLyiXmmBVAis0u9OKH334Rf7TLz4C4I+/9j3+teMf8rXiJV8vJ5yaCd9gw0PzPgA3ZcmD4pb35xf84PgRfzx5wu3RlOq1NGLljZwX3SkxUqwVroljCkQlLBk0MrdsJJagTMCVcnn7LsmJSH4GZRhMHkHGRelIdCI50q0YA2dDz2Ilq8g5ySZY7qwofxpgG5r1VJk1kudvnh+ZVVKsoLzR+Jclu1nBj5tjfjh9AjYBX4DeKoqFJhp4UQmzJzYek/wyqrrj/tGKb51+wt85+hG/Un3Cv3f+e0NCzf949Gu8MveprhR2oyQBZApFYgLoZK7Zd5Zg9MAguAOCdOovBSQiI+gWjcx306kkDUnvUcmKJt91w54nA6mRVXEYo7iXUDXUHvikBmZK+nyviVtNWBvasmBXBbABWyWD25miKhyTomd27xp9L7L9QsHNVnxtrjYVl69qypuCYgnV7RHVTWD6SYu9WstGXr6G3uFevEIvltQvZmAtcSLfEZsSd1Szu1eyemTovaKfjqyG4EF1Be3KsitrrucTPpkd8eT4Vs4DkS4YSu2pbc/9aomLhj7FAGkVWLuKLhhe76ZcbxqWqwafgNm4tqidhp0WT6MyEAqFSUwRpaOwz5wi7gy+M3iT5v8A7ipUVOy2JZPjNU9nt7gjw7qXi+7Z/IjtdEJ4aShvkyfRjsHzQ0Af8VMJZSTUQUDwIV0mglZ3gAw0g6H0HfaRSfNgHwsLSkDQfR8RNfrIQGIfJWNXN0nG3nupZ0O6WJJ8ZeZh/sKo4phAc6hDHeozU3+ZhOYAjhzq56EORquH+pdZB8XvoQ51qEMd6lCHOtShDnWoQx3qUIf6XNTnngkSDXRHDEanIPThYgVmK/rvvtWJjZA+pJLzvk1SjcZLGkD2ouj1SD32iCGnS54dmeZciKwhBHm/KcD7Pf+F9P6Y2AvAnX1QYW+VLpI8Su4mftgdxC6t3hs1JE+AyBdCFXHzgG8UbqbEHFR8KMVktM0r2uB347L3EKWYGCpuIoyEaP4CJkg22FNC8ze7vZVra+hPlaSfVJ5oA96N0a0qRZZKMo2WlU8dZXwR+nj0kehjMg5N0p0iM1VUYtDclXxFE4mMsqSihbC0dLsptw8Vs0nL46MFAI+aBeGh4uPNMR9cnfLyckL1wtK8lEunug2JRRSx68B86WkuDdt3ZWX8H7z8Nf6Pt77Kbz5+n3/z7P/hTXtNpcAkJsWJ7vjF8hlPi9d8pX7Om80V37v3mLdfilxm+XxC/dJImks2423VoN0PpbBDfJWOPcg4YBjYHtEofBVG74AiYOvR3VTriEmJPH1vcJ2layx+ktNlUkpSNlfNspa9VWLlZbPKpfOd/sBItc/GvXlehGzcupF5US4i5W2i/itDMCmhApnXdhfEA8XKSruvDS6F87i65oPpnPfPz/nuwyf8zfvv8m8cf5d/fSarBmdfXPFf69/iox/fxy7FiLZfFeySD0RRpOQiG8BBbJxcz3tTJ2olK/g2JrbXHjMkkg0xRMbGuN86HacKEHqG+Tl8dyYA7Kd/qKSgiHfZIvnN2cRyX6eQWWGmVbBTRC1MBFfLibhuDabyFIVnPtnxeLbgwWRBcypzodKOl1+Y88nqmJttze2mwi1KylcN9ZV4dkyen1NsAtN3F+jllrjaEG9eE32mGCistRydnWK/+YjtuWF3PqbL+EqlOSDXdLyx3NQ1iwt5Q910eK+Z1C1NYuk0pmeaaEMnxYaH1QJNZDWpuJlP+Hh+zG0rE+H1Ykq3KYkbg2rFZyg4RSiS6XW1d6/OzLbk4ZEHOmKIPfTbgkVRc9ZsOCm3Q+Tzeb3m5dGcj49OWL+sKW80ds3A+MnR3aZVEh88k3ttNnQmM6PseC4/XTntRZgdYmI6zCMPOqqBaajSPBl8oBDGF0nqGrykwoTdyIDLxqmwd53uk7sCo4nvoQ51qEMd6lCHOtRnrD73niDVm0/jo//kP0A5kX8AmI2AARlQ8JVoqIf0lyIOUo9QRokzVdzxpxAfDIXqFGYrhp3RjNr40ERiNv4EcNLV6G3ycWhT2sc+XbocH5yzT0hM+IBKUb7ZGC+DGPtASbTjMfhS9N9umkxHk9+B2ebGdEzAyZ4jOT5U7zVrvhz18HeAIrIMIu+vyALsRgxZQcCT7iQO8bxKJ8lFMjGMTg3xmRIliXSGCWwaTFB1lHNiExiS98HvP9Xv+WB4hUryELtRFEvxwggFdKcRd+SZ3BP6/xfOrnlrdsXUtLTB8rKd89HyhJfXczmGl42MVaeorqF5FWhee+xKuo3d/YrVI8Piy4Hzr73mm+fP+erkJY9LSeR4WrxmrndMlaOPmkWseO6O+f72CQDfuX6Td15e0L2cUNxo7EbdkZTk8yq+BGluFRAmgWj3my6hz2dwTpfjSdRazCSLwqOSP0LbWnwyswzLAr1LCRl9AtvS/IIMfKWmPwNgajSCHEARlYDAKl0HOQmplblRrEX6YzdxuPYyvT8qNXqRZFBFMcTLRp18cCrxsnH3O771pY/5dx/9IQBvFZf8oH2D3/34N3jnpw+xKbnGHckO61lP1fR4r/FeS2Rz0PhOxiBG0nWtBs+VQf5GahidGHhmoEcaSbUn/1GDifFfVPtympxAoz4lmYk6fqpZVUPjmz8L43UbbGqIkXtHlvWpxjM/2XDc7LhoxNziyeSGSjvaYHHpi266hmebI26TZObmZkrcWCbvW5rLyOSlZ/LxBnMpoGG8ucXfiLTFPnxAPD2iP5/iZnIi2xPD5p6mPduTzwUG3xSX0qlikskVk56ychynGODzZsO9SvxgGtNjVGAXCrZevv+qm3K5m/JiOefm9YycMDMMYhHlXguoMqBMoKgc1gaaUq7Zzhm2uwK3K0BHJrOWk8mWh1M5xnnR0piey3bKh8sT8Ve5riluU1LRTuaz2eZ7PklSOAIU4hmVrs98T8/nNYByGpwiFiHNITXc24ffDXsSs6jS76XBEyoO8yyPs96bi7pTI5Aex7kyApcRu1G899/8DtvnB0+Qv446eIIc6mepDvKYQ31W6iCV+WzUwRPkr6JMhOOeGBV9ejjue41ZmcFQMnt87Dcj+QFSO0VwVhqfbP6350kxmB5GNZrTIc2LrxEjwyqgUlMabGq6CoPNsbOywIz2DGyTO94EOgoYs2dMKUanaohOHFJscjrMVtgduhdPEF9JdKjLTJFaQCGzU0OySMzeJGYcj2hG4z7lRYt+p8lT6aG/CISJws0VxSI9zG/l+6PV+EKjKi/a/ZzYUSrxkl0VsNOJ2aEGZkdUCp0SNJSXZjDGOHqXeLXHAklLpjagCob39IUiGjOwYOqXinBj6W6PAPjh6YQPLk55dLzg0eSWh/WCN+obtudCxfjJ/QsWXYUPmuvbKauXNdMPCk5+mswmn+9onsP0Rc31q3v8n0/O+L8ffIE3TqVR/PbJx/zy9APeKK6Z6x0PzZqHZs2b9kpebz7kT0+f8p37b/Le1RmbVUXcWGyKPbUrhd1BeSsxvyoIY8g1ZjBvFR+Q0VMlqkjYjScp2IizFl876qbD6oBtOnwtx9CWHtcZ+nWRgDItHi+b3JTlcwAgCTI6MLA4hvma5o9vFK6W+T/M4WTEqBL7InsW9MlTNhRiDhvN+F37zAmdGEjFEspbhb+s+MHVW/zO12cA/FtP/oxfn/yUv//4O/xP/BrvmAfYl8XQuDqv2PZGvCGCIiaGSNykKGOXaBd7sbl3GEY2pb6kZtNr8WhQJjIaWiZwwqkx/SmbLsuEHlbvo0nbCpF9nDrmfj6ZzUY9gklDhLJgNSNwmF/ulZiIBkXcaha7OYtqwrNKkorenZ5zMtnS2J5Z0XJabrhfL7lfLynSYK8fVaxcyU/euuDV9ZyrVzXN8yOaVwIKTp89ZPLeAt7/GPf8BTx/gQaqSgxuJ48fUn7jPgtn6WdqaNAHALoz6X6WgFBdsDOR9UQmwrPGU007jqfbIWFmblumVhC5N5sr3pq85mY+4bvVGyx2FZtNJZG7pPtnmmMxgtYMsblvzJMviQqs+4qPbo7ZrivWNw27bcliJ8dw0ux4OF1wWm44Pd+wPSl4du+YF0uZa9tNxW5RUr2wA/NNElrkGLOBsDcCclBkJko6x04TCaioB8ZILCIqyJioACaz5NI8UFGunZh9rYrxPp29Q6IZE1+ykWpONlM+MUtiZi2NrJFDHepQhzrUoQ51qM9afe6ZIPWX34hP/rN//+4PVcS1ltgZ1FZLnKFTIzM+yzLSg6Jy6k6k4JDWsh8dSQZN8g+kaQ8F+KmHxqOLMDRYoTOYGysU6L2GT++zG1JTGcr0PU0YVqeVT4kgLu2jHw1KgcEAdTQ+BVfHIeEgWmFl5GPP8ZCouCfJGZNtclPn6j2adzLR9JNALJJMQ0dUSl+oXhkxgC0j/XEg1gHduEGaoU3A2sBuW+LXVtgbezR2GRdhYYhJpUgRBnlBPg8KkR/pCGUYYlGHYew1qjXYhaa6ElbI0CyU0M8i/WlAn7acHa+5P11xr5bVcx8VRkVK7QhR82o348ev7tG/I03h2fdh/mGL2Tnas4r1A8v2gWJ3Lxk1PtrxN978kG8ffcyb5SW/WD2jVh6zp8NYhpL3+gve7y647Ge8aOe8fSNymVdXR/BJzfQjRbGJw777SlJlIK1CVyPTgNR4Dia5Vs5ZaALmqMOYQFWNHZDVgc5ZnNO43uI7LQydBKTYtU7GvAK62U1q/FLCTWauaA+my7HRCtek7Rci1coSgDEqlFFGUUo6TjTj/FL9yJJQSUpm2rz9iC8Vm8eJofLlFX/vS3/O35q/w0/b+/zvL7/GT9+7T3FZDNsSIDGyHztqF+kYt5mJEocV+AHYIQFN+frv1SiV03tgSWZsdcm8N+brbLxmB7NLlcduT5ITx9cHOdKn42nVKIkYDC9TDd+1zyJTIyAYTUpSqjy28jSTlnndMi9bZoWADMfFDq0iWgW6YHndTnm+mnOzkBPlXtfUzwxH7wfOf/8T3Psfwqd+x9inT/D3j9k8mbI9M2l+prla7V2zZmQxxATXRx1xDfi5R9UeWzmqumdayY3trNnwxuSWe+WSF+0R113D1W7KuhPQcr0raXcFYWeEKVZEdOmpm46npzcATGxHaTyfrI55vZqwuW3uJKvo0lNWjvP5motmzePJLZpImxJt2mB4sTniJ8/vES4r6kuN3XCHwRNK6I6S6XYVhKEyvIhcX/0oeUGRZIGgd3pg6u3f6+5UlkvFcRx9E++whiDf39MYf4o4V94o3vkffofdJwcmyF9HHZggh/p5qwNb5FA/73Vgifz81IEJ8ldZKqJTs2KtNN++1viJwXdaGr/8lLj3QKw6hV3pu9r/1GCGMoqMJkW9EhhSOswuJqaFrH66TkuKQKZI6xQPm1Z0NSObI28jr37HncIXoIIeKdAmiuSgioM3hu7HFdeolTBd2igSh7Sy7ybyuksPzNGIj0nMq4l2j2kRpKFDp7jfJJGI/SiDUGUkak108rlYjp/3ZcRmNojR+F4l/XrS79dukGowBa/skBYix5DiYlM6C06l1c3ULGRpBhA9RKNkEThkwwVQRZCo3trTFxaUpViMiR52LeNSLgz95YTX85pXR8cUM2m6ZpOWSdlzVO140Cz5xtEz3pq95vunkg7zztEjtvdqjt/1VNc9R+8HJq8MuzPpmDfPJvzj5Zf4yeNzvnb2is1pxZldcW4EZDnRW+a645erj/nV+iMAdtHw5+cPAPjTR2/yD4+/zk04Z/qxomojZhfF1yYFerjUZMbsC/OpxlIShBS+Vbi+wttIP7Wo1Ig3044QFGUpsgEaUCrik2yp3RXCnkjXiGo1eqspb/c8DJI8o1jneRgHJon46+RzmueZnNMBNARQapDT5GjoYS5Y8FG8JoIdI6gnz2Qbu27OP2h/iZuvNHxr/jG/evYhrbN87C4AKK6MyNa6fK1IB5n30a6zdEAJ06IYm0yAfqIkIvnE3ZVhwQhoVIFYIUkmTqF6PaRF5espx2gPaR1unwYiG8wskbgvC0ubEWmSGoCELKuR7+dOc6wHSU/6ng6C1URjCWVkU9Ss6sgn5XhfspXHWM/pfMN5s+Go2PGt8xWcyzau32i4/OqMTy5PaI+fcPzuA6Z//AFxJxdU2O5wH34EH37E/JOH1F98QHte0R6NEbtDXGulcFNkTLo8BNL8d0oTdxpnLb2uWKXbwovihHemF9w7WXHebJjYjsfTW0iMopuuYdVVAm5cN+AUobdsneY9fyZzofAcNTtCVEyqnn4iPjkx3XvCqmC3KPj4tuL55IhPjo+4mKy5SMDoWbnhXrliXu549+iMq/qY6oXFrhPbZZfOWVT41uBrLZ5KGSzLTKc8HzIDKd/bbZQ4d8UAeks01t41ERMby8u9T4CUcV4Gg9wDYwJEEjg2bCOm+8PBOv1QhzrUoQ51qEN9ButzD4LECDGqAQAB8F6jEiiiS4exilBpQmpugkvNng1QK3pr0J1GZ4+EISI3ex9EMf9U0vgDxEJjNskjYyVMC18rfJ0etKdeQIMmCtPEqESnls+bThp6paWptEmSkuUwEtOYAJjEwghO4fe8FsxWYlfzSqDdM4fVrSJUSSaTYzlVAjIGQz8xIQ2lQu8UxXoEQwDiIFUR0AUVEziUxsmPK9p2pTCtwnWKkMbA94ptadA2oE1ElWJqOEToRkU04n2hrERcRq9QKapS7bSwb5KMKGogSMxqZpNErwiVR5mIqjz9OfjaYJep+d2maEkH5QKKtcZfl4RCBnrRTLmpAx9XgXfmHY/Pb3lzfsXXj58DUH3D8ePT+7SnE07ermguHcVK/gA0rwzl0rJ+cc4fPjli8aWat6ZXfGXyAoB7dsk9s+C+WfFA98y1pVElXysk+vVv1R/zpLzivze/ycvyHv4TTbkAu2WQo5hOoZ2MvS/z2MXBTyPHKJudwmwNUYObjYDaujPCiqgdSgeqylFaR924NFcVPmh6Z9A6YHRkuanZvhaqh3JqWMUWvwQx4c2eIvmaUT6ObCkX75j/iudBuqZKNfhm5H3MTWEoxV/CTQTIKKQvpboG9XbD7/W/wOUXZ3z96DlfP33BMkkclu4IsxaZjwoCgqm4x8JQya9km+ZwkQEIeUNZKLRXrOcKVSdWF2LKmT1ulAnS3+oEDCqSh02+nhBwJOtZYv7hOEaCYjACK3vAaI7jVqmxzdIZtQckZSbQAODkP4z3APl+BUoJiKvN+PkE6r6aTnk+9xRHLWdHGy4mgrgdFTu+fvqcLx9f8v2Th7z//JjZN79M80o2cvR+i/397xH7DvfsOXa7pTg+YjpNtCCliIUhlAY3L1m9UdJPEiiGAHpyzOleGBU67N0TnAFV8Lye8Oys4/hkw/3ZinvJ9+SNyS2hURxVc97ljN22JOwMsdW0G9mHLihW9QRVBqbzHVXyDHFFkk61lrgx6IWFW8vlZcWr2THNkQA9F/M1D6cLHtZLzh+s+XHd8n5zjnshc628HRlLpoWwUcKGSr8bQjGeZonSjQMAnSuUEbSCEPeiktVdNmJSSg3MPcfA6FMmmdMmkCXPlX35lGvuskMOdahDHWq//ts3f+/ABjnUz3X9q3/z+wc2yOe4PvcgyP5TXkz/9k4PBpFKh5GNQDZJjMReoVREFZE4DwSnCX1eRtv73izdyJtJK79BR6LRoHVagRbAIDM1OoWwJmyQvwslSTJVAkn2ABezSWaVe4kdkrARCUl2k5uimB60+ySZCGXadiTtg3zebiF2GQyR1XoQICZk87yccNN4YqFBaex61JJrN/6R45fXcrJJ1ImtEaT5in3aXlp99xPZVqgCrkkmsjqOnitOCbhSB2k6i0AMEHKzYiO6E0NP1WfAJRusJgCgV8ReSyqIAlUGwizSJ28W3yRj2340irWbkfUjZpSGaAy+KvjopOGDe2c8uC/+Al88uuKX3viEt+t7XFXHTD4pmLw0VNcplWTZc/ojz+RVwepFyQ82b/KTBxe8d09Wpd+cXvOkuuZBccsbxTWFcjw0Kx6nefXANPz27EdMvtjyu+Vv8uOTB9hXJeWNGpKGBolJXlVO83MwQdyTHdhN+nfQA2PHOTGI9LUh2oBrLG3h6VLCTFP2+CDXQ2k9582G0niutUxG7zXBa5SOuN7Q9xq1tuhk9KiCGD2aZJBqtyKlyak7eZ7HQaEz/iw38L5UIgGaKvqp+n/be/dgW66zsPP3re7ez/O6b0lXunrYRrbAz7HBj2DLsQdPhWCSKZMMAabGLpJAZpgMjyljGGYIOPbUZBwSSGowlYRkKk4wA2aiVAJ4HCLbgJ3YCIMty7Le1pWu7uu897O71zd/rNW99zn33KsrId179j7fr2rXOad77e719eru861vfQ/yBaXoeHw0+mRbQmML5NEGD4xvZuO2Fi9ZvsCNsQrQ8FjGuNkk2UhiBRwBH3LcAOSL4RxpX4IBKQ2GpcpTpLnlScaCSsrgpMDhEa1WjvdCPg6vWV9K/Y6RNFY1qhLWQrRYSP2uqI1Au8IU3IjQLoYg1bmHk4nRo/pKVbEpfHliOBKZmvDKpC1CnSeCqbzN9XulVNRVk3ZH0cm4sNjh7NIhANJuztJinxML23zHya+xfrzDV265kadXQ46d1Se73HDktSx//jR+YzMkUY2JVHcgQnNlhbR3C6PDTYpYqSjvOHwK2XbMZdQCn2ht/EWqZ9Tht5psbGRsrHQ4vxJcQY51exxp9TjW2sYfFtaGbTb7LYb9Bn47HMSNBAbhnt8uHFlnTJp6skawJqVZybiRUm42SHqOxrpDNx3jtTDOT3Y7PLO8xB0nLnDH4kVeffgplhtDvt4JIWyD9RbJRkrzggtVZGI1meperRMATyUT1oSQKJpwz/uG1km2pQTnptySYhsAH/8P1N5x1TM0lhiqNUnk65OpQ0i493dWJjIMwzAMw5gPzNnVMAzDMAzDMAzDMIwDwTX1BBGR9wDfC7weOA58A/gE8CFV3YptbgMeu8whDqnq+tTxWsDPA98PrABfAt6vqp95Lv1SFdQLPq7SailxqRTwCd4pTpQdi2K5C6tsqSdpeJJmCc1YbjOugBfjkFdBvaCDtE6gGhqBNj15qpTNKtHdVDnNYfD88C0XS+kGT4Uqh0DRkrpSRdFOSPtS50GoCMcLOUG0WmGOi4k+Vcq2R52rK3FIMcmFUVU0SIfgS/DpTtdpiAktmyH0gCSsThYKSdWHQahcsqOKx5g6bMg3mOQuiKExDiCW0PXDkPDUpwlF11F2FN+cyqFQQtJzlBLzpTZCGIKrco6kLlaZSEliEleXy46KGlIIOpba3b5sxxXS6Hrum5Ws0asllx0eM24MbqD1NfRnhfGZFheOhS+u3t7lpsMb3LC0xZmXCpvLHQbHU9rnw6pz55mEhSeHdL/Ro3U+I+236N+4yP03B9f8x04c5uaVDZabA5azISOfcKK5xUta5wB4WfMZbksH3N15nKVTQ3578ZV8ZfVGzq0uUQzS+l4l5kqpKpNUK8MQ7iFNiJ4XElaGp8utluGZcD6EZenIkacpeRKTTbZK1AsiIWnkcJxReDfJsZMUKJCI4puCE2XQzSiLSZiFVyEfOxgkJH1H2nOkA2pvFpeHleqQd0ZJ8hCKUnmKZANPmQllP3prlML40KT0qiaQbYTyzN3HU86MjrNxS5ujCyGM4+jyNlvNnO2sg45iDiBAF8Lqv8s840Ea8mv48CzJyJHFsKnO0wlLTxY0vurZ6Kf0TrUY3gitVk4jJpkdDRshPwOEULo6q+vUODiNpY0dWoK4yfNWJZhNYnJVl8exqZ5HH/M4pNRJZn0GVF4S8ZmZTuha5QiBGKbmwTkghsgR29d9kOBJ5WKlqbQHjXVXh/GVjYytdpvV5RXyOxNu7GzwhqNP4I+Ek5w9tcR9t9/Mxm23cvjBgu4D5/FPPIXmUy8uAFXKtTWSr3m63S40w72mVfWsTpPR8Q6DIynDQ45BSJFD0fX4RvCuSLeFVp5QbjnW18L31xYXaS2OuGFlk0PNPjd2cxYaYzZaLXqd0GbQCxWYXN8h2wl53qRo+jrEqdka0+mMyLOSUScjd41YRau6VxN8r83X8xNsHGvxqiNP87LFcyw1QpnfC0cWeGZrkbWFJdKNlCyGx9S5W6oEwTGMSdNJGeSwIb6rWjHUMr7/6v9Z01RhUzG8rHqeqvetSjiej6GK015DmnDp8eaI/aqPGMYs8aunPnvJNguRMWaJt77p/svus1CZ+eZah8P8BEHR+CngNPBa4GeBt4vIm1V1Osf9h4F7dn1/a9ff/xT4TuB/Bh4F/nvgd0XkTar6pavtlC8ETzIpP6uCJJOuhHwSu1yNPUjh0FxCKEBV5hCQBJxTylhuE5W6tGjlVu6zUBmAzOMzz7jlcMMQulGdoyqv69WFSXmitX/69CRKUyVvOsqp71dx86ESxVS8d5VHrwFlJ4S0+CoxXkvrKg1VaEKVD8PFcIodHtdj0L6Q9BPKttYu3OVUzg8/ncxVw0SrVrSrPu2a5FVVRVwhiFdUQr6RfEEYr1BXsKn6iSb4WOrXNUJ+D4Ak82jmKUN9SNxISMZan7s+pw+TfFGQPAlhPlVsfZU8MuYy8aniUkI8PdWkXMh6isshGylZT2nEMsDb4y6Pn2ywcLjPcmdA+2TO1qEmmyeCkWRwLMOnbRZPj8hWBxx+oKR7tknvmfBobt+yzNdv7OK6OUnqKUtHqz1mqXMHALcsrvPmQ4/w7Z2vc1fjLCtH+ty/cJIHj97A6jiEAGzmoRbtuEwYFhlehUGeUcRcFeMiQQTyPGE0SkkbBajUVa7nvTgAACAASURBVHpGW81wvYopY0rukCqnRy+p8syiLmO93QhVgFphoJPU4xJPkoTQMpeUIbwglqF1ojSzAidK6R2jIqHfazHqx4pAEHKKxJwXSczTkoyEtB9zcmxpNNppSPY7drhCGIWoInym5EvAltDYgMWBYzBY4smT4WY9fnSTpdaI9HCohDMeJzinLHWDVbDbGNNOc25sbzIoM1JXsjluszoMVVGefOoI0ODYl3qkvZLGVoPNYZveiZTWcrhQznmQYATKGiHpr/eTEJmyDKWiIb5zSgmG1snjAoVD04SkH0O7dCrng4+hIenkeaurrcQNUiUPngolQqaOr4Q8FG7q2ZgynkIsrxqrQiWjaAiMNoxGrARTNlMeHZ7kkcPHOXXTRU4thpLPp9qrvOmuR/jEymt59GUnWLjzRhZPn2DlT8N+GY5DMovhCB2OKNfW9g6XAVoryzRvvpHhzYskoyD0eNlRdKnD/yCEtzSi0U83M8os4/EjLS4c7bHcHtJMCw63+xxuB+urrgib4yarm13Gm82QjLlwlPEmH+SOtFnQbo9pNXJ6WRnyhAwn5ZTdSJCLDZ4ZH2JcJLziyDlONEPo1c2tdfpLDb62cIIzm0tsnlsg2UjrHDlSvYtiGGE9vnXekzho0VDh0xjWkumOHDp1rg8fjFdhzOO7USf/FySWdUZ1UtK9MnzNsRGEfaqPGIZhGIbx4nOtjSDfparnp/7+tIisAv8CuBv4val9j6rq5y93IBF5NfDXgPep6q/GbZ8G7gd+Dnj31XZK4+SOHROOkClOEg2T9XISq19n64/JSBWHFlIrq+PM4VKPHyd1/oX6qzEmOynCQcoOIU9Iq8Rnvl7pdEMXcnwUEpL4p7IzKWlVOhGCYaTh8Ymi6WTS6EbUuQ2qygF1bH8R5JmU0wyrjZUBQbOQODMkZJ26VjplxPCQxgR/viehCklbpzxFlEInOUKkWpmeqlZR5SFAw+qyFASvFSb9dUW18i+oc+TL4QCVkp71BB04yoGjbKYTb5GGx2Ul0ijxXULekr7b4ZlSlRGtrkuVRHV6dV7iCnvlHeHj9an2u3FIDlsZjaoPQOcMSNGg10sYn0g5vNTjyFIPloIHwupSl4uNLuPlFkuPp7TP9Gg/3aN5IdwHrbUO2+dTxsspZSsM9bjV4mw7lOB9ZvEQT920zLkTS7xx4WFOpWu8of0oL2mcoxfdWNbLDq06GQAkeHq+yVCDi8B22SLXhFwTNoo2NzY2KNWRxYH/8tZJxj5lc9xie9xkY9BiMGhQ5HFVfhASp1JGT5NxqOKig5ibpSG4LEzYs6zEq9SVZ+IlxImy3BxypNWjneSsjdtsjNusxWSV/WED7wVfJozGDnKHjEMVGgjeCNl2MIY0tpXWhicdCS56dIwPCUVLKbqhIlHropKMoD8Kxz8n0F0YstQasdAcU3bCM7vUDEaQhis51V3ldQtPhGsjBX4qmvCxG47xsdYbyPodlh8bsvzoiHTQYHO7Qf9UaOcWciQJBpBua0wrDYaf3FfGqBQf812WPiSbVair8EiYpzLqZuT9lKKfkAxdbWR0xcS7apLIeJcRJA3vhsoIMmXbmOSRQCEJ7zZX7DQohETL4fcyGqIqrzGInlK5kvaVxcccxbkmT67dwBPLoQpPZ3nAX7zjfr7npj/i9NHD/OnLT/LY6mEuvCaUl8liRaHmutJaU1buO4+ePoPv99lNub6BG+d0Li7RfiTWUs5SxscX6J9o0LvRkXdBG0w8IryQ9ABN6Y0X6S22aLZzOq0R7SwIcdPCBqcW1thaavLI2lHWNrqU/anKVNspeT+hyBPa3RHt9hjXGTHKY+4XL4yHGWxmuO2U1WKF+0YN7jh6EYATrS1ONDd5/eFvsLnU4sHFE5xeX2awHZ5XHYfyvTKOBvQyvHcm5c3jdR4BCJJBXba98vBKqHP6IGHctPLuoPL8CoaQamyrv6vv1DmE5pd9qY8Yxqyzl3cImIeIMXtcyUsEzFNk1rmmRpBdCkfFF+LPk8/xcO8GcuDjU8cvROTXgJ8Ukaaqji777QrR4FWhgmo1C5BLFEAtXa1Iu8yjzRLVJKzaRW+PetWsSrRZWUWcok1PkSluGCdE4+DDLONQCUIzH9pViVOFkHy18kTIo4FgqvxsWMYLCm5VZUKjy7YmwWCgSUyYWk1Spsr4Vl4jdTfHQrWkX5XHrVzf6/KgMrVaGCuvBLd4nVR3qUqvxuo0ZRKTN/qdRhLRnZdZCupqMhBDcspJQlIplWxb6socZVvxjdC/ZBAqgfhUKGPy2LKllJ0EbXmk4dFUQ7LUvptEIpTBSFSVNa5XXXXnz6nbJXiFuMoLJ1QA8s0YJlMI6SAYhiCUgm2uBxf5YdnhXJ7Qao9Z6QbX+BPLW6y9tGB9aYHRSsbKwiLt8znZavRAeLJHY6PJeCUlbwtlQ0IFlHZMFLnQ4Ez/GP9u2OTsDUu8eflhbskusuL6rLjQiZWkR0tyupJzOMnpiJAgZDKZxOdx0TNHOeLaJOIo47anlv6EUmFLU86XXZ7Mj/DE6CibRfAwOTNcpp3kbOQtLg67bA6bqArDcZgtiyitRs5ic8xSc0gqJcMyY1SGGyUvE0oVsqTkls4aR7MtNooOm0WLByXEOZyrj1cizfCcaAf8YhiwQTel2EzIF4RiQ2iue7K+0jkXS7sOhfGykHdD1YuyCVkf2qEID2WzxdahFD0udJtjGklJlpR1iNywTFkdd3lidBQf79jlZMBN2RoA37H4ZU6+do1fyN7B4AuLrDxc0r5YkOQJEh+IwQ2OcrEgd75OvNtKi/r+T5Nw46sKiYPSK+WUp4iIkjpPp5lTdB3jImE8ysinwp6kkNrIGMLMJs+TxLiJHWWHufSeB+pJde0cFyfPPp0k0oRgOK2MtTDxKnBjyHpKtim43OGfacTr3OA3R6/h7S95iP9i8XG+5YbTDE9k3HfLrQA83jtCwxU801tidbvDxW8+TufMCZafCEa85sURbnuM29jGr2/gt7YuMZAkX4VDJ2+i9YobGR7JGB4ShsfiOB4N3mHBkK1QCqNeg+FGsy5tfG55gVuPrXGivcXLDp1ntdPjYr/LVj8YKUbbTaSfIKsN+r0UaZekrbwO/2o2CtrNnK20Rbmd4foJw7NdHhiE5+GxzmGOLPR52fJ5TjQ3uXP5LMvNARsrwSA3KlNGZcK4SFAVxkUIqxxV3i69Bm4zDSGQw0mVJXWTsfVZHKsshClWFbGmy2KXLa0T60oJSSmTUtW7xn0e2Zf6iGEYhmEY14T9UB3mbfHnA7u2f1hEfhnoAZ8GflpVvzy1/5uBx1R19xLh/UADeGn8/YqIQNosdpTILYokeHFM42PjSNLwlEQPkTIaQeqY7vh3dcw0GFrSRoEv4+r4MIGxC20LQaqQl7QyYpRoIxxb8rgiOOXCLiXBbV2icSS6sddEw0vZhNJPXKnruPXxVA6SNK7G5xMZq1XDsuPxxc5zu0a9ZBw8RfJYSnaotSECoPDUpR+1ilmvjDaAr+bg0bAQrt9kHKr+VZVZkmH4mfUmLttlWyla0BiHfA91iA1QtoSinVAsOIrlArLgMaOFUDk6i4vhHfF4lYdNFRpVyV0ZncRPPH6q60QCRVtxGaCKb0hdiraxFYxjaR9a5xzjcYtBu8FgKRgQOktDVjoDGjeVrLa7XOi0aJ9rsvBUeDTbF3KSQUFrXJJ1Usqmw6cScsIAeU+QMmGrXOazw4yzNyxy5+JZ7mifpxXdjvq+SdPldN2Ik+kai27IihtzNIb0LEiTBReMHsmUYaTiVLow9VdJ3jrDhc6jVL4lPe9YcZ5173i6XOTJ/AirxQKPD8Pq/sinNF3B8cYWx7NNvApDzVgrQrjOdtHkqeEKDVdwItvkRLZBx41puS5PN5YBWEvb0RvExRLVIIlOStEuFOQuVodZdORdR+uiksVwmea6kgzBHRKKDgwPC41NxZVhf+tCqOKyrQsMFnOarTHNrCCpjIIKq4MOj28epvCO0jvaWc5NCyFU49sPPcR/2f0aN7xmnX946J08cfwmVr6W0b7o6ZytHlrHaJRRjBLWmw0kDf2vvGIk5hLS6AFShcRUJXaR0L7RLGg1cjrNnGZWMGyEyXVZOopxgh8lIQ8MgJfodcYk704MLUKDQaMe8qoSTbRMTpdLrT0IXPUu0dA+VvOty8gQtkkRvNeqELc02PzItoCvdfhU7y4evu0or1g5y62ti3zb4qMAfEv3KZbcABdds/7oJbfzyPZRHjgXjGH9c11azyyw8OQhFp66gc6D5yge/8Yl92zx1NM01zdotZr4Uzdy8bWhOs14BeTYmDSbuIONBhnJekZjLVbeamQ8vN7izPElXnrkAic7Gxxt9dheDEaQp7tLrG508ReapFsJ2nMUzbT2Dsu7OZ2FEYvdIcOsZOiaSC+FizEEbr3Bk1mXC0e73HJonUOtPql4TnRCuEwmHjdlkSriQAzKMM7nBwucWV9ieLYLa7EiVzEZ32oMfBIMpuOliSFEp/4nKSGPjPjwP6YuhV7dDrG0+gHjuuojhjHPXM5D5IXGPE6Ma4V5isw219UIIiInCa6in1LVL8bNI+CjwCeB88DLCTG7fygi36qqlXJyGFjb47CrU/uviiRRGjEnAUCRlYwkw3sXYu6nwyMghEAkHmnGmP7ChTbR0KCOEOIiIWC/zhUiIakeEMpL5gll7tDc1SuTSfQEkZiIVavjlzIxuMQ+1IlO42Rkx1JuEpJ4SBU+E3dVXhI+d7hBKHmrMMnLUSVmHYcEmT6T4EqtIT8H7DReVJOjsiVk2yEkpOpGMgyGlbKUOvdHVQ4VwmpkbXSojjUV5lOmIamrb1Q/ieVqw/5sW2IsfEiA6cZCMp54vYRSq8FlX4oU3wzJYOuV8mqs4jkrr54qlCDsD94v6urhDNfJV8akGOaTam2/0XSSt6QoJ4lU00FYJfdpgl8NF26wnDE42qDVGdNs54xugc2VjNHhsL99tkX7oicdxMl+Fowqrk6gC801Rbxj3Oty/0aTRw4f5djSNp0s3GtlnEQl4umkY1pJwUpjwGIavE0Opz0yKck1oePGrBZdRj6l78Pq/Ss7p2m5nJWkx6Ib0gDGLNRGliMx023XeW5iiyOuT6+RcWfraYAQeuMzGlKSSUkiHoePSWtD/55oHqXvG5zINuJ5Biy6AW45XMfjrW0e7xxmY9Rio99mvMtI6RqeUhSfJxTtkrKdki8IzfUqXCYYRJprYexGh2GYSp3E1xWQ9gS9kFD0Hf1ORj/zwUsMJmFiZXgeiaVHn+4EI80Tm4fYvrnFt3Ue4X23/AG/3Xklf3jsDloPtWjElBZuDI0NQXsJPk1iEtTJPTVdolTi/FMqL6zqfk0gb8Co7dGmR7JJbqAk8yRZibj4migFbXh8fGBdfLDUa7zXg6dV5QG3Y/U/ekXVYTBTBpEw2Y6hMlU4W9U/wjOtSZh8QwyzqMIAR5D1wD2a8Y2tG3l85RitxRG3x1CRTjomdZ4bWpucbK7xivbTvKL9NG8+FAxxT922woNbJ3js4mEuXOzQOHuS7tMnaa0FGdKBp3V+RPaNCxSnn4JeD9nuccS9NJ5/kc3bOwyPl+hCSdoqQuhiQ4nRYeGZOpsyGC7y1XHCkeUeC40x3TTcLMe727TSgvPZAsPNJoyCMbvKX+N9xnae0FwYkaae5tKIkQMdVqGKjmRbGI67PNQPz367OWapFZ6jdpqzkI1oJTmZeNpJTirl5Hlt9DnS6vFVdwO9RodyIyHbdiE3y1R58hA2E95hZTu8h32jCneM9Y9TUNEQbhnf9UEIkO4ul6E5Z7/oI4ZhGIZhvPhcNyOIiCwA/wYogPdW21X1DPBDU00/KyK/Q1hF+WlC5nWYrFlecuirOPffAP4GQHp0OUzAoV6RbcXY8DxPKKoKFonWhoTKPT3NyqBYCvhSak8MTYOnSG1A8YKOHTlpfY4sK2lkBaUPVWZEgqHB1QYTrZMiqgpF4cIqeJz8qcYcAVXozu4rUidp9Uwv7vtkogSXSYL2Xcy1ERIqJlW4TAEulA4ICnJ1VacMAZXnhGZK2QoVVtLtycQyGYKMNBw/JkwVBR/PEVy1Y6JFF93sK8MO1N4h1QpmFZ5TJR1N+6AijJeVsgn5IvjhpDqNFCEcJRkFuYqWkC9KML5MV1uIKWHCKnk1+Zt4m6gTRCezU/ETr5+kJCbJnbSvQ5iIE9sqUaWn9lapSLcdo2GL/qEM183JmgXaKBl3q0SPDUbnHM0NRzLQepW9MoKohAo+4pVkKKT9jPFWwpOLLWSqko4qE28lCVVcqvs4ST3iPOodSVoy2m5CLkgRbpx/f+Qusqyk08w51t2uJ2gLcVJ4ezt4lXfcmJbkHEm3w08X8p4suiHniyXWyw4bZQcnnmPpFitJ2H8s6bPoBqz7DktuSMeN6MqYI8k2r2o+BUBvIeOhxRt4eHiCr22d4Fx/kUGeMZzKw6DeoWUIDVNXMG4LRTd6XrUcnWegsa2IwuiQUCwoZbyO1Wp62gvJhMuhoGlC2azCw3zwJCplkqAyF3zMe/LMRoOPjV7PkycP886V+/n+45/jts5F7ln+FnoPrgDQWI8JXbeq0KudxlVNwj1S5bqpvChq26ZSGz99I6FsJiG0pxUNbkslyfKYJC3RBvg8CYmXq9CtGC7j05AguAprq+5VzabyRlTP9vTzyFTYS+VdprpzvxDChRwxj84khwiE5yvbFtIedM44/PkGvtHgwZXgFeSbHlFB2yWtxREvOXaBU901TjSCl8Rdnaf5tsVH4CYY+ozVcoEz4xUe2joGwP1nbkQe7HLiP59koSgonjmLjkbInz4EwKGvNzn0kltYfdUyW7ckDG9KcCtjWMkZRgNBupGQDIVsw5H7Ds+st3DdnIXFYIQ4vrjN0fY2Ny+sc2ZliYu9Dr3tFn47hn+NHDJyjAqhWMjpdEakK7423OWDDN9LSXoOXzQYtFIGzRYb7fB/J8vK6OkzJnOehUZ43jrxeVvJBtzaWWV4JON0WrDR7jBab4TEz4Pq3RiuscvDO0d8qBRWVu+l6HmoqUIViplRG/20cLS6473/w84h10sfmdZFTp3cD465hjH7PFePE/McMV4spj1FzCtk/3Gp7/s1IJaSuwe4A3iXqp6+UntVfRL4feANU5tX2Xt15dDU/ssd71dU9fWq+vpkqfuc+m4YhmEYxnxwPfWRaV3k2JE5TsBiGIZhGPuMa770ICIZ8JvAtwLv3BVXe8WvsnOl5X7gL4tIZ1cc7l3AGHj4ag6qOvHsmCQgDDHZzunkpKLIHos6EkMnRKZCJaTyKpFJh2M4i68qPTQKEhfO452QOE/pXR2S4zWsplYeIqFvvi7NKlMdUC91upI6l8Vl1p9kKsZbndZu71XFm2mnEonx4qrhOlxunUtdkN+nUpfJJbYXPwlPqau9VB41VflGBa1+6mUW1KJr/nS1C4mJWYXJirMrBK0SQVYJVgvQMSHZZB6r7EwLOk3MmbijbOiV0LiSXscxMFmxr/otU+Oh4Eqt+6ZJWL0t2w7fSKBZkKYlGtIHkHcSyk5IOFtlqZzOF1Bfh+j14schWaKmSZ3Uc0c/i9BXP3b1qn6RxtgLFQqnuO1kRwjD2LXIM8+w1aAoHZ1mqGzSjeE2DVfgVVhIRizELI1H0u361El0mxlqxkbZxqEsuiGlCwOZScmSG5KT4PA0KOnEm+ZoEjpxmJxcL9DzTc42lugXDUqVusxvrskkt4YoJB6c4GPuFN8MSR9FweVBVhUmiSBj+ejqWmoueBRXp9aQSbWNCk/9TpBc2N5sc/rQCsOljFsaq7y0dZYTi7fxSDvEhehWEkufVu+LXTfflOdTSFapO/MAaTyngs/ZUc4UoByHsDyXhTwf4jSEclXeX5XLmw/eTTp128IuDy835QUy3c34XtuROHjaO0yDV5dOhetV75nQh0ni5Crhsc9B44X2WTh2MRKGhXCmtUgqnnZdgxeOJ1scS3q0pKRE2PIZX27fDEChCV+5cCvjJQedNrgEfInm4fuaj0lOn6V9ssvwcMZoFKoWpVlJ0Y7DOqxCSyR4xonDS8qoOUnkmzY9hxt9xj4hLxPG45RRTHbLyNVlpH30psqSMmSGAMoiqcPnXCEwdoRoynD8ulQ7kCaexHkK70hjzhivjkxKOumYTiNn0CwYNlI8k2e68uYR1To5bgiBmoS7TIcF1p6DldeQELwSddc9OmfsN33EMAzDMIxrwzU1goiIAz4GvAP4ziuVnNv1vVPAW4Dfmtp8D/B3gO8hlLRDRFLgrwKffC6Z2GXXpFI1GC90xyRS6omz7GjLpYqiyqWTZzcxmEyfp1QJuUemzju9H6Krv1Z92DU7V4nn2y3DpX3d+b3Jp6rSsrvTWhl2nkUPrsrwSjR6TFenqI0rcZI5nWeA6ePvdY4dF3oy4apzb1SGkWp/dW4/+Q5MGU+qxIBXkkcu8/NKyOSYtcHjCt+rkqlC7FN0S68nICp1Al0KqRPDujJMTCvD0Y5jujCx9UkMQ8h0kqxSNIRkOZ0kz80mBjXiuVVBEsUXAs4h1f5Wics8zVZw0V/IxrTTiXt+ZfjouDGdZETL5WRS1Mkty2gVy6SkKQWJ+PoDYVI3JiHXlCxObHMcJUI/3u+5OtZ9h+2yxaDMyH0SQslqA+alF1ljyV7YOQGsJ4iw08AAE2NbZSCs7tV4/+54RqYMn6RK2ihYyEZkUtLTButlh+1xYxIqVd+PsuOerx/p+I7QJMjjqAwV1X0R9xOMBWWjqtYS+5BoXUa3+sh0J6uyuFPP/WXREAJ2iU2yut4yucV3vP72uPdFgamy2lWOk6r0rs8mOXR8Fl5GvuWRVslic8xCNqI5VdKmpw2cDwfp+ybnikUeGNwEwPl+F9d3JGMPeQF+uh527M/SInnHhXLCSXgkvHch1wvRsBqNOOris5QpSRLOmTiPV2FQZvSLBqMiDcbtKeMFLhifqvDGUqUudaxT4XQa2+G0DkVJEk+almRpSeI8mStpJCVpvHBOPCWOwodPGXPUSJXAGi69T6fewXuO066FgPD7pWM5T+xXfcQwjGvL7vAZC48xXgze+qb7LSRmn3GtPUH+MUFJ+LtAT0TeOLXvtKqeFpGPEKYcnyMkIrsT+ABBbfxQ1VhVvyQiHwf+QVzNeQz4YeB24PuuhTCGYRiGYcwkpo8YhmEYxgFF9JIl1BfxZCKPA7deZvffUdWfFZH3EZSHlwKLwAXg9+L+B3cdr01QYP4asAL8CfB+Vb33OfRpC3jwWRvONkcJ13GeMRnnA5NxPjAZ54M7VXXxenfixWC/6SMicp5Qgnfe76mD8NyYjPOByTgfzLuM8y4fBBm7qnrshTzoNTWC7EdE5Iuq+vrr3Y8XE5NxPjAZ5wOTcT4wGY0XmoNwvU3G+cBknA9Mxtln3uWDF0/G61IdxjAMwzAMwzAMwzAM41pjRhDDMAzDMAzDMAzDMA4EZgSBX7neHbgGmIzzgck4H5iM84HJaLzQHITrbTLOBybjfGAyzj7zLh+8SDIe+JwghmEYhmEYhmEYhmEcDMwTxDAMwzAMwzAMwzCMA4EZQQzDMAzDMAzDMAzDOBAcSCOIiNwiIr8hIhsisikinxCRU9e7X88HEblbRHSPz/qudodE5J+IyAUR6YnIp0Tklder31dCRG4WkV8Skc+JSD/Kc9se7a5KJhFpicjfE5EzIjKIx33rtZBlL65GPhG57TLjqiKysqvtvpIv9uk9IvKbIvJE7NODIvJhEVnc1W4mxzD26VllnINxfJeI/J6IPCMiIxE5LSK/LiJ37Wo3y+P4rDLO+jjuRkR+J/b9g7u2z+w4zipi+si+1UdkznWR2CfTRybtZnkcTR+ZtJvJcbwa+WZ9DPdCrqc+oqoH6gN0gIeArwB/Cfhu4MvAI0D3evfvechzN6DAjwBvnPq8fqqNAJ8FTgPfC/xXwKeBC8DN11uGy8h0Fvj3wO9G+W7b1eaqZQI+BqwDfx14B/AJYAC8Zh/Ld1vc/qFd4/pGINnP8sU+fR74deD7gLcB/1Ps4+cBN+tj+BxknPVx/F7g7wHviTL+AHA/sAncOifjeDUyzvQ47iHvmSjPB6e2z/Q4zuIH00f2tT7CnOsiz0HGmX7/YfqI6SMzMI5XKd9Mj+FlZL5u+sh1vwDX4YL/baAEXjq17XagAH7sevfvechzd7x53nmFNt8d27x9atsysAr84vWWYY/+uqnff5C9/ylflUzAq2O7905tS4EHgXv2sXzVi+4Hn+VY+06+2Idje2z7b2Nf//ysj+FzkHGmx/Eyfb0z9vXH52Ecr1LGuRhHYAV4hqBU7FY65m4c9/sH00cue5/thw9zros8Bxln+v2H6SOmj8zIOF6FfHMzhuwDfeQghsO8G/i8qj5cbVDVx4A/IFz0eeTdwNOq+h+rDaq6Afxb9qHMquqvotnVyvRuIAc+PtWuAH4NeJeINF+QTj8HrlK+q2XfyRf7cH6PzV+IP0/GnzM7hrEPVyPj1bIvZbwMF+PPPP6c6XG8DLtlvFr2u4z/B3C/qv7rPfbN4zjud0wfYf/qI/Oui8Q+mD4SmPVxNH0kMNPjuAfzqovAPtBHDqIR5JsJrqe7uR+4a4/ts8LHRKQUkYsi8q9kZ0zxlWQ+JSIL16aLLyhXK9M3A4+pan+Pdg3gpS9eF18QPiwihYR48Xv2iIebJfneFn8+EH/O4xjulrFipsdRRBIRaYjIy4CPEqz3vxZ3z8U4PouMFTM7jiLy5wgrg3/rMk3mYhxnDNNHJsyqPnKQnpuZff/tgekjMzqO866PzLsuAvtHHzmIRpDDwNoe21eBQ9e4Ly8EG8BHCG6Mfx74eeCdwOdE5HhscyWZYTblvlqZnq3d4Re4Xy8UI8LL728Cbwd+Angl8Ici8oqpdjMhn4icBH4O+JSqfjFunqsx6XXx8AAACc9JREFUvIyM8zKO/4kgy9eBVxHca8/FffMyjleScabHUUQyQv//T1V98DLN5mUcZwnTRybMqj5yEJ6bmX7/7cb0kZkfx3nXR+ZWF4H9pY+kV+7q3KJ7bJNr3osXAFX9Y+CPpzZ9WkQ+A/xn4H8E/heCbHMjc+RqZZpJ2VX1DPBDU5s+KyK/Q7Bu/jTw/XH7vpcvWmz/DSHO/b3Tu5iTMbycjHM0jj8ALAF3EP7p/n8i8udU9XHmZxwvK+McjOP7gTbwd6/QZl7GcdaYm2t5QPWRuX9u5uD9V2P6yFyM47zrI/Osi8A+0kcOoifIGntbhg6xtzVp5lDV+wgWxDfETatcXmaYTbmvVqZna7e6x759iao+Cfw+k3GFfS6fiLSAewgv83ep6ump3XMxhs8i4yXM4jiq6gOq+p9i7OY7gAXgJ+PuuRjHZ5Fxr/YzMY4xFOGngZ8BmiKyMlVKr/o7YU7GccYwfWTCrOojB/K5mZX33zSmj1zKLI7jvOsj86qLwP7TRw6iEeR+QgzRbu4CvnqN+/JiMm0du5LM31DV7WvWqxeOq5XpfuB2Eens0W4MPMxssdvquW/liy5vvwl8K/AXVPXLu5rM/BhehYyX/SozMo67UdV1Qn+qWMuZH8fd7CHj5ZiFcbwDaAH/kqA4VB8Iq0xrBHfauRvHGcD0kQmzqo8c5OdmFt5/gOkjz/ZVZmQcdzPv+sic6SKwz/SRg2gEuQd4o4jcUW0QkduAt8R9M4+IvB74JkJcGQS5TorI26baLAHfxezKfLUy3QNkwPdMtUuBvwp8UlVH16a7f3aiBfUtTMYV9ql8IuIItbvfAXy3qn5+j2YzPYZXKeNe35uZcdwLETkBvBx4JG6a6XHciz1k3KvNrIzjlwixw7s/EBSRtxMUhbkbxxnA9BFmXh85kM/NDL3/TB+58vdmZhz3Yt71kTnTRWC/6SO6D2oFX8sP0I0X+MuEMjvvBv4EeBRYuN79ex7yfAz4IPBfExKR/ThwAfgGcDS2ccAfAk8C/w3wLuBegpvQLddbhsvI9Z74+b8I1s0fjn+/7bnKRMiqvEZI1vYO4DeAIfC6fSzfR4BfAP5KfCn8EPAEsA7cOQPyVXJ9EHjjrs/NczKGVyPjrI/jbxHcFr879v9vAl+L/f+mORnHq5FxpsfxMnIr8MGpv2d6HGfxg+kj+14fYc51kauUcabff5g+YvrIDIzjVco302N4Bdmviz5y3QW/Thf7FMFlbBPYAv5f4Lbr3a/nKcsHgD8lZGXP4w3zK8CNu9odBv5ZvIH6wH8AXn29+38FufQyn3ufq0yEBDx/n1Bmakiwlt69n+UD3keo8b5GSG71DPCvdr/k9rF8j19Bxp+dkzF8VhnnYBzfD/wR4R9sH3iQkNX7tl3tZnkcn1XGWR/Hy8i9Q+mY9XGc1Q+mj+xrfeQK7/h7n6s8+/W5eTYZZ/39h+kjPzsn4zjX+sjVyDfrY3gF2a+LPiLxAIZhGIZhGIZhGIZhGHPNQcwJYhiGYRiGYRiGYRjGAcSMIIZhGIZhGIZhGIZhHAjMCGIYhmEYhmEYhmEYxoHAjCCGYRiGYRiGYRiGYRwIzAhiGIZhGIZhGIZhGMaBwIwghmEYhmEYhmEYhmEcCMwIYhjGTCAi94rIvVN/3y0iKiJ3X79eGYZhGIZxkDB9xDBmn/R6d8AwDON5ch/wJuCr17sjhmEYhmEcWEwfMYwZw4wghmE8L0Skqaqj63V+Vd0EPn+9zm8YhmEYxvXH9BHDMJ4rFg5jGAYAIvJqEfktEbkoIgMReVBEPhD33Ssivy8i3yUifywiI+BvxX1LIvKPRORpERnF7/2oiMjUsRdE5JdE5BuxzVkR+ZSIvHyqzd8WkQfiuddE5Isi8pev0N9L3E+n+vlOEblPRPoi8hUR+UuXkfeeeK6BiPyBiHz7C3M1DcMwDMN4Ppg+YvqIYbzYmCeIYRiIyLcC9wIPAz8KnAZeBrxqqtk3Ab8I/DzwKLAqIg74d8DrgP8V+DLwncDfB44BPxW/+wvAu+PfDwFHgLcAK/H83wd8BPg54LNAO5778PMQ5yXAPwQ+DFwAfhz4DRF5uao+HM/3uniePwb+OtAHfgj4lIi8WVX/6Hmc1zAMwzCMPwOmj5g+YhjXAlHV690HwzCuMyLyGeB24E5V7e+x/17grcDrVPVLU9v/IvBvgfeq6j+f2v5PgB8ATqrqBRH5CvBJVf2xy5z/HwFvVtXXXaGP9wKo6t3x77uB/wi8XVXvnWrzFuAuVX0objsOnAF+RlU/FLf9B+Am4NWqOo7bEuArwIOqeslKjWEYhmEYLy6mj5g+YhjXAguHMYwDjoh0CP+oP7aXwjHF49MKR+StgAf+9a7t/xJoEBKFAXwB+O9E5KdE5PXxH/w0XwBeE11U3xn79Hx5qFI4AFT1HHAOOAUgIm3gbcD/A3gRSUUkBQT4VJTJMAzDMIxriOkjpo8YxrXCjCCGYRwivAtOP0u7M3tsOwys7pGQ7Jmp/QA/AnwUeB9BwTgnIr8wpVz838APA98G/C7BtfUTInLbc5CjYnWPbSOgNdWnBPgZIN/1+R+AQ9Gt1jAMwzCMa4fpI6aPGMY1wXKCGIaxRlg9Ofks7faKnVsFDotIo3LjjNwQf14EUNVt4APAB0TkVuA9wP8OjIH3a4jL+yjwURE5BHwHISb34wRF5IVknSDvPyYoO5egqv4FPqdhGIZhGFfG9JFdmD5iGC8OZl00jANOdDn9feD7o2vmc+HThPfI9+za/n0EheKSknGq+oSqfoSQtOxb9ti/pqofB359r/1/VlS1R0hC9mrgPlX94u7PC31OwzAMwzCujOkjpo8YxrXCPEEMwwD4CYIC8TkR+QjBFfUO4DWq+iNX+N5vExSWXxaRY8D9wF8AfhD4sKpeABCRzwH3EBSNbUIM7KuBfxH3/wqwBXyOEC/7TYREZp98YcWs+THgM8Dvisg/JbjWHiVklU9U9SdfpPMahmEYhnF5TB8xfcQwXnTMCGIYBqr6BRF5C6Ek3C8BTeAJ4Fef5XteRL4T+BDwfkKpuccJ/9T/wVTTzwB/BfhJwnvnUeBHVfUX4/4/AN5LUDSWgacJycz+txdAvL36fZ+IvCEe/xfjOc8D9wG//GKc0zAMwzCMK2P6iOkjhnEtsBK5hmEYhmEYhmEYhmEcCCwniGEYhmEYhmEYhmEYBwIzghiGYRiGYRiGYRiGcSAwI4hhGIZhGIZhGIZhGAcCM4IYhmEYhmEYhmEYhnEgMCOIYRiGYRiGYRiGYRgHAjOCGIZhGIZhGIZhGIZxIDAjiGEYhmEYhmEYhmEYBwIzghiGYRiGYRiGYRiGcSD4/wGjFLTYVw0vywAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "x_cr = train_seismic[:, idx, :].swapaxes(0, 1)\n", + "x_crl = train_labels[:, idx, :].swapaxes(0, 1)\n", + "\n", + "plot_aline(x_cr, x_crl, xlabel=\"crossline\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Model training" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2019-12-09 04:08:24,354 - cv_lib.utils - INFO - logging.conf configuration file was loaded.\n" + ] + } + ], + "source": [ + "# Set up logging\n", + "load_log_configuration(config.LOG_CONFIG)\n", + "logger = logging.getLogger(__name__)\n", + "logger.debug(config.WORKERS)\n", + "torch.backends.cudnn.benchmark = config.CUDNN.BENCHMARK\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Set up data augmentation\n", + "\n", + "Let's define our data augmentation pipeline, which includes basic transformations, such as _data normalization, resizing, and padding_ if necessary. " + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "# Setup Augmentations\n", + "basic_aug = Compose(\n", + " [\n", + " Normalize(\n", + " mean=(config.TRAIN.MEAN,), std=(config.TRAIN.STD,), max_pixel_value=1\n", + " ),\n", + " Resize(\n", + " config.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT,\n", + " config.TRAIN.AUGMENTATIONS.RESIZE.WIDTH,\n", + " always_apply=True,\n", + " ),\n", + " PadIfNeeded(\n", + " min_height=config.TRAIN.AUGMENTATIONS.PAD.HEIGHT,\n", + " min_width=config.TRAIN.AUGMENTATIONS.PAD.WIDTH,\n", + " border_mode=cv2.BORDER_CONSTANT,\n", + " always_apply=True,\n", + " mask_value=255,\n", + " ),\n", + " ]\n", + ")\n", + "\n", + "if config.TRAIN.AUGMENTATION:\n", + " train_aug = Compose([basic_aug, HorizontalFlip(p=0.5)])\n", + "else:\n", + " train_aug = basic_aug" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load the data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For training the model, we will use a patch-based approach. Rather than using entire sections (crosslines or inlines) of the data, we extract a large number of small patches from the sections, and use the patches as our data. This allows us to generate larger set of images for training, but is also a more feasible approach for large seismic volumes. \n", + "\n", + "We are using a custom patch data loader from our __`deepseismic_interpretation`__ library for generating and loading patches from seismic section data." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "This no longer pads the volume\n" + ] + } + ], + "source": [ + "TrainPatchLoader = get_patch_loader(config)\n", + "\n", + "train_set = TrainPatchLoader(\n", + " dataset_root,\n", + " split=\"train\",\n", + " is_transform=True,\n", + " stride=config.TRAIN.STRIDE,\n", + " patch_size=config.TRAIN.PATCH_SIZE,\n", + " augmentations=train_aug,\n", + ")\n", + "\n", + "\n", + "train_loader = data.DataLoader(\n", + " train_set,\n", + " batch_size=config.TRAIN.BATCH_SIZE_PER_GPU,\n", + " num_workers=config.WORKERS,\n", + " shuffle=True,\n", + ")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Set up model training" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, let's define a model to train, an optimization algorithm, and a loss function. \n", + "\n", + "Note that the model is loaded from our __`cv_lib`__ library, using the name of the model as specified in the configuration file. To load a different model, either change the `MODEL.NAME` field in the configuration file, or create a new one corresponding to the model you wish to train." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "# load a model\n", + "model = getattr(models, config.MODEL.NAME).get_seg_model(config)\n", + "\n", + "# Send to GPU if available\n", + "model = model.to(device)\n", + "\n", + "# SGD optimizer\n", + "optimizer = torch.optim.SGD(\n", + " model.parameters(),\n", + " lr=config.TRAIN.MAX_LR,\n", + " momentum=config.TRAIN.MOMENTUM,\n", + " weight_decay=config.TRAIN.WEIGHT_DECAY,\n", + ")\n", + "\n", + "# learning rate scheduler\n", + "scheduler_step = max_epochs // max_snapshots\n", + "snapshot_duration = scheduler_step * len(train_loader)\n", + "scheduler = CosineAnnealingScheduler(\n", + " optimizer, \"lr\", config.TRAIN.MAX_LR, config.TRAIN.MIN_LR, snapshot_duration\n", + ")\n", + "\n", + "# weights are inversely proportional to the frequency of the classes in the training set\n", + "class_weights = torch.tensor(\n", + " config.DATASET.CLASS_WEIGHTS, device=device, requires_grad=False\n", + ")\n", + "\n", + "# loss function\n", + "criterion = torch.nn.CrossEntropyLoss(\n", + " weight=class_weights, ignore_index=255, reduction=\"mean\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Training the model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We use [ignite](https://pytorch.org/ignite/index.html) framework to create training and validation loops in our codebase. Ignite provides an easy way to create compact training/validation loops without too much boilerplate code.\n", + "\n", + "In this notebook, we demonstrate the use of ignite on the training loop only. We create a training engine `trainer` that loops multiple times over the training dataset and updates model parameters. In addition, we add various events to the trainer, using an event system, that allows us to interact with the engine on each step of the run, such as, when the trainer is started/completed, when the epoch is started/completed and so on. \n", + "\n", + "In the cell below, we use event handlers to add the following events to the training loop:\n", + "- log training output\n", + "- log and schedule learning rate and\n", + "- periodically save model to disk." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# create training engine\n", + "trainer = create_supervised_trainer(\n", + " model, optimizer, criterion, prepare_batch, device=device\n", + ")\n", + "\n", + "# add learning rate scheduler\n", + "trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)\n", + "\n", + "# add logging of traininig output\n", + "trainer.add_event_handler(\n", + " Events.ITERATION_COMPLETED,\n", + " logging_handlers.log_training_output(log_interval=config.PRINT_FREQ),\n", + ")\n", + "\n", + "# add logging of learning rate\n", + "trainer.add_event_handler(Events.EPOCH_STARTED, logging_handlers.log_lr(optimizer))\n", + "\n", + "# add model checkpointing\n", + "output_dir = path.join(config.OUTPUT_DIR, config.TRAIN.MODEL_DIR)\n", + "checkpoint_handler = ModelCheckpoint(\n", + " output_dir, \"model\", save_interval=1, n_saved=10, create_dir=True, require_empty=False\n", + ")\n", + "trainer.add_event_handler(\n", + " Events.EPOCH_COMPLETED, checkpoint_handler, {config.MODEL.NAME: model}\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Limit the number of datapoints if we happen to be in test mode" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "if max_iterations is not None: \n", + " train_loader = take(max_iterations, train_loader)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Start the training engine run." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2019-12-09 04:08:27,209 - ignite.engine.engine.Engine - INFO - Engine run starting with max_epochs=1.\n", + "2019-12-09 04:08:27,211 - cv_lib.event_handlers.logging_handlers - INFO - lr - [0.02]\n", + "2019-12-09 04:08:31,265 - ignite.engine.engine.Engine - INFO - Epoch[1] Complete. Time taken: 00:00:04\n", + "2019-12-09 04:08:32,319 - ignite.engine.engine.Engine - INFO - Engine run complete. Time taken 00:00:05\n" + ] + }, + { + "data": { + "text/plain": [ + "State:\n", + "\toutput: \n", + "\tbatch: \n", + "\tdataloader: \n", + "\tmax_epochs: 1\n", + "\tmetrics: \n", + "\titeration: 3\n", + "\tepoch: 1" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "trainer.run(train_loader, max_epochs=max_epochs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will next evaluate the performance of the model by looking at how well it predicts facies labels on samples from the test set.\n", + "\n", + "We will use the following evaluation metrics:\n", + "\n", + "- Pixel Accuracy (PA)\n", + "- Class Accuracy (CA)\n", + "- Mean Class Accuracy (MCA)\n", + "- Frequency Weighted intersection-over-union (FW IoU)\n", + "- Mean IoU (MIoU)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's first load the model saved previously." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "with open(CONFIG_FILE, \"rt\") as f_read:\n", + " config = yacs.config.load_cfg(f_read)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "model = getattr(models, config.MODEL.NAME).get_seg_model(config)\n", + "if max_epochs>1: \n", + " model.load_state_dict(torch.load(config.TEST.MODEL_PATH), strict=False)\n", + "else:\n", + " model.load_state_dict(torch.load(path.join(output_dir, \"model_patch_deconvnet_skip_1.pth\")), strict=False)\n", + "model = model.to(device)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next we load the test data and define the augmentations on it. " + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "# Augmentation\n", + "section_aug = Compose(\n", + " [Normalize(mean=(config.TRAIN.MEAN,), std=(config.TRAIN.STD,), max_pixel_value=1)]\n", + ")\n", + "\n", + "patch_aug = Compose(\n", + " [\n", + " Resize(\n", + " config.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT,\n", + " config.TRAIN.AUGMENTATIONS.RESIZE.WIDTH,\n", + " always_apply=True,\n", + " ),\n", + " PadIfNeeded(\n", + " min_height=config.TRAIN.AUGMENTATIONS.PAD.HEIGHT,\n", + " min_width=config.TRAIN.AUGMENTATIONS.PAD.WIDTH,\n", + " border_mode=cv2.BORDER_CONSTANT,\n", + " always_apply=True,\n", + " mask_value=255,\n", + " ),\n", + " ]\n", + ")\n", + "\n", + "# Process test data\n", + "pre_processing = compose_processing_pipeline(config.TRAIN.DEPTH, aug=patch_aug)\n", + "output_processing = output_processing_pipeline(config)\n", + "\n", + "# Select the test split\n", + "split = \"test1\" if \"both\" in config.TEST.SPLIT else config.TEST.SPLIT\n", + "\n", + "labels = np.load(path.join(dataset_root, \"test_once\", split + \"_labels.npy\"))\n", + "section_file = path.join(dataset_root, \"splits\", \"section_\" + split + \".txt\")\n", + "write_section_file(labels, section_file, config)\n", + "\n", + "# Load test data\n", + "TestSectionLoader = get_test_loader(config)\n", + "test_set = TestSectionLoader(\n", + " dataset_root, split=split, is_transform=True, augmentations=section_aug\n", + ")\n", + "# needed to fix this bug in pytorch https://github.com/pytorch/pytorch/issues/973\n", + "# one of the workers will quit prematurely\n", + "torch.multiprocessing.set_sharing_strategy('file_system')\n", + "test_loader = data.DataLoader(\n", + " test_set, batch_size=1, num_workers=config.WORKERS, shuffle=False\n", + ")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Predict segmentation mask on the test data\n", + "\n", + "For demonstration purposes and efficiency, we will only use a subset of the test data to predict segmentation mask on. More precisely, we will score `N_EVALUATE` images. If you would like to evaluate more images, set this variable to the desired number of images." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2019-12-09 04:08:36,547 - __main__ - INFO - split: test1, section: 0\n", + "2019-12-09 04:08:41,543 - __main__ - INFO - split: test1, section: 1\n", + "2019-12-09 04:08:42,067 - __main__ - INFO - split: test1, section: 2\n" + ] + } + ], + "source": [ + "CLASS_NAMES = [\n", + " \"upper_ns\",\n", + " \"middle_ns\",\n", + " \"lower_ns\",\n", + " \"rijnland_chalk\",\n", + " \"scruff\",\n", + " \"zechstein\",\n", + "]\n", + "\n", + "n_classes = len(CLASS_NAMES)\n", + "\n", + "# keep only N_EVALUATE sections to score\n", + "test_subset = random.sample(list(test_loader), N_EVALUATE)\n", + "\n", + "results = list()\n", + "running_metrics_split = runningScore(n_classes)\n", + "\n", + "# testing mode\n", + "with torch.no_grad():\n", + " model.eval()\n", + " # loop over testing data\n", + " for i, (images, labels) in enumerate(test_subset):\n", + " logger.info(f\"split: {split}, section: {i}\")\n", + " outputs = patch_label_2d(\n", + " model,\n", + " images,\n", + " pre_processing,\n", + " output_processing,\n", + " config.TRAIN.PATCH_SIZE,\n", + " config.TEST.TEST_STRIDE,\n", + " config.VALIDATION.BATCH_SIZE_PER_GPU,\n", + " device,\n", + " n_classes,\n", + " )\n", + "\n", + " pred = outputs.detach().max(1)[1].numpy()\n", + " gt = labels.numpy()\n", + " \n", + " # update evaluation metrics\n", + " running_metrics_split.update(gt, pred)\n", + " \n", + " # keep ground truth and result for plotting\n", + " results.append((np.squeeze(gt), np.squeeze(pred)))\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's view the obtained metrics on this subset of test images. Note that we trained our model for for a small number of epochs, for demonstration purposes, so the performance results here are not meant to be representative. \n", + "\n", + "The performance exceed the ones shown here when the models are trained properly. For the full report on benchmarking performance results, please refer to the [README.md](../../../README.md) file." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pixel Acc: 0.522\n", + " upper_ns_accuracy 0.000\n", + " middle_ns_accuracy 0.000\n", + " lower_ns_accuracy 0.999\n", + " rijnland_chalk_accuracy 0.000\n", + " scruff_accuracy 0.001\n", + " zechstein_accuracy nan\n", + "Mean Class Acc: 0.200\n", + "Freq Weighted IoU: 0.273\n", + "Mean IoU: 0.105\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "invalid value encountered in true_divide\n", + "invalid value encountered in true_divide\n" + ] + } + ], + "source": [ + "# get scores\n", + "score, _ = running_metrics_split.get_scores()\n", + "\n", + "# Log split results\n", + "print(f'Pixel Acc: {score[\"Pixel Acc: \"]:.3f}')\n", + "for cdx, class_name in enumerate(CLASS_NAMES):\n", + " print(f' {class_name}_accuracy {score[\"Class Accuracy: \"][cdx]:.3f}')\n", + "\n", + "print(f'Mean Class Acc: {score[\"Mean Class Acc: \"]:.3f}')\n", + "print(f'Freq Weighted IoU: {score[\"Freq Weighted IoU: \"]:.3f}')\n", + "print(f'Mean IoU: {score[\"Mean IoU: \"]:0.3f}')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Visualize predictions" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's visualize the predictions on entire test sections. Note that the crosslines and inlines have different dimensions, however we were able to use them jointly for our network training and evaluation, since we were using smaller patches from the sections, whose size we can control via hyperparameter in the experiment configuration file. " + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA3gAAAmdCAYAAABzl58xAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAAgAElEQVR4nOzdfbhlZ10f/O+PziRDEl4mBZUG0gSxYCKGl4DQWAgvj4EWghaQFmwleSA19oK2FHiuALURgbQiXigWSuRFH8GSClSGi6eCSAhYh5cAQRkRig6SSMSECQGSEBhyP3+sdcpme2Zmn5l99t7nns/nuva156x977Xvvc7Lb777vu+1qrUWAAAAtr7bLbsDAAAAzIeABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8ID/o6rOrqpWVRcvuy8AMKmqThlr1G9Mbf98VX1+Ob2a3dj39y+7H/RPwIM5qKp3jX+4PzWHff3GuK9TjrxnALBxE2Fq8nbrGKbeUFXfv+w+zsOBQiNsZduW3QHY6qrqbknOSdKSnF5VD2qtfXTJ3QKAefhMkreM/75jkrOTnJfkJ6rqwa21/72sjk141LI7AKvECB4cuZ9O8neS/PL49flL7AsAzNOftdYuHm/PSfLAJL+Z5M5JXrjcrg1aa3/eWvvzZfcDVoWAB0fu6UluTPKiJJ9N8s+qasd6DavqkVX1zqq6bpzq8pdV9eaquu/4+OczBMYk2TsxLeY3xscPuEbuQI9V1U9U1WVV9RdVdUtV3VBV762qR8/l3QNw1GittSSvHr88M0mq6v1j/bl9Vf2ncRrn/qp6+trzqur7qupXx1p0a1V9qareVFWnrvc6VfWzVfXpqvrG+JwXZvgwdb22667Bq6o7VdUvVNWeifr3kap67vj405PsHZv/9NR01FMm9nPHqnpJVf3Z2J8vV9XvVtUZB+jPk6vqE2Pbv6qqV1TV7Q92XGGeTNGEI1BVZyW5d5LXt9a+UVVvSvLiJP80yW9PtX1Oklck+WqS/5Hk2iT3SPLIJB9N8idJXpkhMJ6R5FeSfGV8+lVH0M1Lktya5Iokf53kbkl+PMm7q+rJrbW3H8G+ATj61AG2vz3JDyZ5d5JbknwpSarqB5K8P8n3JfmfSd6Wof79ZJJzquohkyNwVfXiJP8hyV8l+a8Zgt2zkzx05g5WfW+SDyb5gSQfSfJrSY5N8kNJLkrySxlq668k+TdJPpnkdyd28ZVxP3dJ8oHxfV2e5F1J/m6SJyb5v6rq0a213ROve36S1ye5IckbxuPwTzP8XwEWQsCDI3PeeP9b4/2bkvz8uP3/BLyqul+Slyf5fJKzWmtfnHhsW4ZikdbaK8e2ZyR5ZWvt83Po42Nba3snN4yF72NJfjFDQQaAQ6qqSnLh+OX0evO7JDmjtXbj1Pb/d3zsEa21D0zs66EZwtOvJHncuO0HkrwgQ718YGtt37j9ZdnYh52vyRDuXtBau2TqPdw9SVprV1XVKzMEvKtaaxevs59XZQh3T22t/beJfbwkQx29NMnaLJw7Zfig9qtj3/eO2y9O8qEN9B2OiCmacJiq6vgMnz5+IUOByvjH/I+SPLKqTp5o/q8y/L5dNBnuxufsb619abP6OR3uxm1fyhDsvt/ZOgE4iPtU1cXj7ZeTXJlhKcENSV421fbi6XBXVQ9I8pAkvz4Z7pJkHPl6R5LHjuEoSf55hhG7l6+Fu7HttRmC4CFV1fdlmKnyp0n+8/TjrbVrZtzPXTLU+XdNhrtxH59L8utJfqiqfmjc/IQkd0jy2sna21r7WpKXzvKaMA9G8ODwPSnDH/JfG9ckrPmtJGdlmGr54nHbg8b79yysd6Ox0F2U5LEZpsRMrw+8W4ZPSgFg2r2T/Mfx399K8sUMUw9fss4HiFeu8/wfGe/vvt768Qw16HYZRtuuzDCDJRmmV077wxn7fGaGaaR/0Fq7bcbnrOdBY99OOEDff3C8v0+ST2U+fYcjJuDB4Vubnvmmqe3/PcmvJnl6Vf3CGP7ulOTWyU8jF6GqTsyw9uDuGYrL/8xwQpjbMpzq+uEZ1iQAwHre0Vr78Rnb/s06204c758w3g7k+PF+bSRvvX3NOttlbR9fPGirQ1vr+8PH24HMs+9wxAQ8OAzjBV4fNn65Z1iS8LecmiFEXZ5hsfa9qurEIwx5a59ErncmsTuus+3/zjBqt94ahNfk4AULAGY2NZtlzVfH+2e21l43w27Wpnh+T/52KPreGbuydoKyvzdj+wNZ6/tLW2svmqH9ZN+nzdp3OGLW4MHheXqG6R+XZzhb1vTtHWO7tVG+tYXoPzbDvr893q8X4taK1knrPHb/dbZ9/3j/zsmN4yL5mc9GBgCH6SPj/UNmbP/J8f4frfPYj864j48laUkeVVWH+r/uwWruR8f9LLLvcMQEPNigsVj8dIai8LTW2jOmbxkWZX85yROr6o4ZzrJ1W5KXVdXdpva3raomP+1bG+FbL8R9JslNSR5fVXee2Mc9M5xCetoXxvuzprb/23xnrQAAbIrW2oczhLzzqurx049X1faqmgw/b8lQX583LjNYa3e3DGe7nOU1/zrD5YhOS/L8dV5zsr7eMN7/rZo77uetGYLihdOPV9XtqmpyJsyuJF9LcsHk9f2q6oSsyEXhOTqYogkb9+gM0x7fNZ7V629prX2zqt6cIXQ9pbX261X1/AzX3fmzqnp7huvg/b1xf7+U4dTKyTAq+Nwk/7Wq3prk5iR/0lp7V2vt1nFq5XOTfLyq3pFhjcBPJPn9DNfamfRbSf6fJL9WVY9Ick2GxecPyXAtn39y5IcDAA7qqRlq266q+mCGyx3sT/L3M4x27ctwopK01j47XhLhPyT546r6nQyja0/JMKI2a9362SQ/nOSSqvqJDNeCPSbJ6UkekO9cnujrVfXRJA+vqtcl+fMMo3avGc8IeuHYt1dX1TMyhNWvJzk5w0yY78l48rLW2leq6t9mmMnzsap6S75zHbw9GQInbDojeLBxa9Muf+MQ7d443p+fJK21VyQ5J8nuDKdvfk6+s0bv99ee1Fr7/zJcA+jYDGe/vCTJkyf2e1GGU1Mfk6GAPWC8f9V0B1prV4+v8f7xtZ+Z4aLn/yjrn+0MAOZqvIj5/ZP8pwzXw3tmkmdkCDzvTPKvp9r/3Ljt6xnq2+Mz1LiZRvDGfXwpyYPH19w5PvdfZDj79Uummv/LJO/NUGtflqHu7hz38+UMQe6FGf7f/C8zhL4HJPlfGcLr5Ou+IUMY/UKG+v/PMlyWaLKOw6aq9dfDAgAAsNUsZQSvqu5RVW+tqhur6qtV9fapi0IDwFFJjQTgSCx8BK+qjstwlqFbk7wowzznlyQ5LskPt9ZuWmiHAGBFqJEAHKllnGTlmUnumeTerbXPJUlV/XGS/53kXyX55SX0CQBWgRoJwBFZxgjeHyTZ0Vo7a2r7FUnSWnPhZQCOSmokAEdqGWvwTk/yqXW2O30sAEc7NRKAI7KMKZon5jsXlZy0L+MpaQ/lmDq27cjxc+0UAKvnG7kp32y31rL7sUBHVCO37Ti+HXOHEw/VDIAO3HL9Nde31u46vX1ZFzpfb17oQQt4VV2Q5IIk2ZHj8iP1qM3oFwAr5MPtD5bdhWXYUI2crI/bT9iZez/x321WvwBYIVe99t//5XrblzFF84YMn1BO25n1P7VMkrTWLm2tndlaO3N7jt20zgHAEm24Rk7Wx207zG4BONotI+DtybDGYNppSf50wX0BgFWiRgJwRJYR8HYleUhV3XNtQ1WdkuSs8TEAOFqpkQAckWUEvF9P8vkk76iqJ1TVuUnekeTqJK9dQn8AYFWokQAckYUHvNbaTUkemeSzSX4ryZuT7E3yyNba1xfdHwBYFWokAEdqKWfRbK19IckTl/HaALDK1EgAjsQypmgCAACwCQQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8AAAADoh4AEAAHRCwAMAAOiEgAcAANAJAQ8AAKATAh4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8AAAADoh4AEAAHRCwAMAAOiEgAcAANAJAQ8AAKATAh4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8AAAADoh4AEAAHRCwAMAAOiEgAcAANAJAQ8AAKATAh4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE7MNeBV1dlV1da5fWWq3c6qel1VXV9VN1XVe6vqvvPsCwCsCvURgEXZtkn7fXaSj058vX/tH1VVSXYlOTXJs5LckOSiJJdX1f1aa9dsUp8AYNnURwA21WYFvE+31j50gMfOTfKjSR7ZWrs8Sapqd5K9SZ6fofgBQI/URwA21TLW4J2b5ItrxStJWms3JnlnkicsoT8AsArURwCO2GYFvDdX1ber6stV9dtVdfLEY6cn+dQ6z9mT5OSqOmGT+gQAy6Y+ArCp5j1F88Ykr0hyRZKvJrl/khck2V1V92+t/U2SE5N8fp3n7hvvdyb5+pz7BQDLpD4CsBBzDXittU8k+cTEpiuq6gNJPpJh7cCLklSSts7T62D7rqoLklyQJDty3Fz6CwCLsKj6uP2EnXPpLwBb16avwWutfTzJZ5M8aNy0L8OnlNPWqtINB9jPpa21M1trZ27PsfPvKAAs0GbUx207jp9/RwHYUhZ1kpXJTyX3ZFhnMO20JF9orZl+AsDRQn0EYK42PeBV1ZlJ/kGSD4+bdiU5qaoePtHmjkkePz4GAN1THwHYDHNdg1dVb85wvZ6PJ/lKhkXkFyX5qySvGpvtSrI7yZuq6nn5zoVcK8kvzrM/ALAK1EcAFmXeZ9H8VJJ/nuRZSY5L8tdJ3p7kP7bWrk+S1tptVfW4JL+U5NVJdmQoaI9orV095/4AwCpQHwFYiHmfRfOSJJfM0G5fkvPHGwB0TX0EYFEWdZIVAAAANtm8p2gela6/4KEban+XS3dvUk8AAICjmRE8AACATgh4AAAAnRDwAAAAOmEN3iFsdH3d4e5zVdflbcb736hVPTZbzeF8Lx17AICtxQgeAABAJwQ8AACATpiiOVr2VMRZX38zpswt+70fyqr3b1WnMc7juE3vY1Xf60Yc6rj08B4P1zx/147m4wgAy2QEDwAAoBMCHgAAQCcEPAAAgE5syTV4++9yfK5/4mqvy9oss6yRWVv7supr13pxNB3njfz8LcIiLmPSw1qyZfyMrtrPCgAcLYzgAQAAdELAAwAA6ISABwAA0IktuQaPgzua1oSxenr7+dsqa8m24nGfpc/73/ahBfQEAPphBA8AAKATAh4AAEAnBDwAAIBOWIMHcIS24vo3AKBPRvAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8AAAADoh4AEAAHRCwAMAAOiEgAcAANAJAQ8AAKATAh4AAEAnBDwAAIBOCHgAAACd2LbsDizL9h+/btldmKtv/e5dF/p6h3P8Ft1HAAA42hjBAwAA6ISABwAA0Imup2j2Ng3zYGZ5r7NOkdys4zbv/ZryCQAA380IHgAAQCcEPAAAgE4IeAAAAJ3Ykmvwauf+o2p9Hetb1s/Astb+zev9WrvIVrH9x69LvW//srsBAFuKETwAAIBOCHgAAACdEPAAAAA6sSXX4MEybfX1n/Psv/V8zNNW/90CgFVgBA8AAKATRvCAw7aREZcDjfbNYx+HMvkaW3XUcaOjW6v+Po3WAcDmMIIHAADQCQEPAACgEwIeAABAJ6zBO4qsrXlZ9bU59Gkea66WuY9F/d7Ma23aqq07tOYOABbDCB4AAEAnBDwAAIBOCHgAAACdsAbvKGQtDGzcVv69OVDf57E2bysfFwDokRE8AACATgh4AAAAnTBFE+AoZXolAPTHCB4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOjFTwKuqu1fVq6pqd1XdXFWtqk5Zp93OqnpdVV1fVTdV1Xur6r7rtNtRVS+vqmur6pZxvw878rcDAIujPgKwamYdwbtXkp9MckOSD67XoKoqya4kj0nyrCRPTLI9yeVVdfep5q9P8swkP5fkcUmuTfLuqrrfRt8AACyR+gjAStk2Y7sPtNa+N0mq6hlJfmydNucm+dEkj2ytXT623Z1kb5LnJ3n2uO2MJE9Ncn5r7Y3jtiuS7Eny4nE/ALAVqI8ArJSZRvBaa7fN0OzcJF9cK17j825M8s4kT5hq960kl02025/kLUnOqapjZ+kTACyb+gjAqpnnSVZOT/KpdbbvSXJyVZ0w0W5va+3mddodk2G6CwD0Qn0EYGHmGfBOzLAGYdq+8X7njO1OnGOfAGDZ1EcAFmaeAa+StANsP5x23/1g1QVVdWVVXbn/xukPNwFgZS2uPn7jpsPsIgC9mGfA25f1P11c+2Tyhhnb7VvnsbTWLm2tndlaO3PbnY47oo4CwAItrj7uOP6IOgrA1jfPgLcnw/qBaacl+UJr7esT7U6tqumUdlqSbyb53Bz7BADLpj4CsDDzDHi7kpxUVQ9f21BVd0zy+PGxyXbbkzx5ot22JE9J8p7W2q1z7BMALJv6CMDCzHodvFTVk8Z/PnC8f2xVXZfkutbaFRkK0+4kb6qq52WYcnJRhrUDv7i2n9baVVV1WZJXVtX2DNcBujDJqUmedoTvBwAWSn0EYJXMHPCS/M7U168e769IcnZr7baqelySXxof25GhoD2itXb11HPPS/LSJC9Jcuckn0zymNbaxzfYfwBYNvURgJUxc8BrrR30LF5jm31Jzh9vB2t3S5LnjDcA2LLURwBWyTzX4AEAALBEAh4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8AAAADoh4AEAAHRCwAMAAOiEgAcAANAJAQ8AAKATAh4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8AAAADoh4AEAAHRCwAMAAOiEgAcAANAJAQ8AAKATAh4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8AAAADoh4AEAAHRCwAMAAOiEgAcAANAJAQ8AAKATAh4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJ2YKeFV196p6VVXtrqqbq6pV1SlTbU4Zt693u/NU2x1V9fKquraqbhn3+7D5vS0A2HzqIwCrZtYRvHsl+ckkNyT54CHaXpLkoVO3r021eX2SZyb5uSSPS3JtkndX1f1m7A8ArAL1EYCVsm3Gdh9orX1vklTVM5L82EHa/kVr7UMHerCqzkjy1CTnt9beOG67IsmeJC9Ocu6MfQKAZVMfAVgpM43gtdZum+NrnpvkW0kum9j//iRvSXJOVR07x9cCgE2jPgKwajbjJCuXVNX+qrqxqnZV1X2nHj89yd7W2s1T2/ckOSbDdBcA6I36CMCmm3WK5ixuTfLaJO9Jcl2S+yR5QZI/qqoHt9Y+PbY7McNahWn7Jh4HgF6ojwAszNwCXmvt2iQ/M7Hpg1X1exk+eXxhkp8at1eSts4u6mD7r6oLklyQJMd8zx2PuL8AsAiLrI/bT9h5xP0FYGvb1OvgtdauTvKHSR40sXlf1v8UcufE4+vt69LW2pmttTO33em4+XYUABZo0+rjjuPn21EAtpxFXOh8+hPJPUlOrarplHZakm8m+dwC+gQAy6Y+AjB3mxrwqurkJGcl+fDE5l1Jtid58kS7bUmekuQ9rbVbN7NPALBs6iMAm2XmNXhV9aTxnw8c7x9bVdclua61dkVVvSJDYNydYRH5vZNclOS2JC9b209r7aqquizJK6tqe5K9SS5McmqSpx3h+wGAhVIfAVglGznJyu9Mff3q8f6KJGdnmFpyYZKnJ7lDkuuTvC/Jz7fWPjP13POSvDTJS5LcOcknkzymtfbxDfQHAFaB+gjAypg54LXWDnoWr9baG5K8YcZ93ZLkOeMNALYs9RGAVbKIk6wAAACwAAIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8AAAADoh4AEAAHRCwAMAAOiEgAcAANAJAQ8AAKATAh4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8AAAADoh4AEAAHRCwAMAAOiEgAcAANAJAQ8AAKATAh4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8AAAADoh4AEAAHRCwAMAAOiEgAcAANAJAQ8AAKATAh4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4cMuBV1ZOq6m1V9ZdVdUtVfaaqLqmqO0y121lVr6uq66vqpqp6b1Xdd5397aiql1fVteP+dlfVw+b5pgBgEdRIAFbNLCN4z03y7SQvSPKYJK9JcmGS36+q2yVJVVWSXePjz0ryxCTbk1xeVXef2t/rkzwzyc8leVySa5O8u6rud8TvBgAWS40EYKVsm6HN41tr1018fUVV7Uvym0nOTvK+JOcm+dEkj2ytXZ4kVbU7yd4kz0/y7HHbGUmemuT81tobx21XJNmT5MXjfgBgq1AjAVgphxzBmypcaz463p803p+b5ItrhWt83o1J3pnkCRPPOzfJt5JcNtFuf5K3JDmnqo7dUO8BYInUSABWzeGeZOXh4/2nx/vTk3xqnXZ7kpxcVSdMtNvbWrt5nXbHJLnXYfYHAFaFGgnA0mw44FXVSRmmiry3tXbluPnEJDes03zfeL9zxnYnbrQ/ALAq1EgAlm1DAW/8lPEdSfYnOW/yoSRtvaes8/Us7dZ77Quq6sqqunL/jdMfbgLAci2rRn5XffzGTRvoMQA9mjngVdWODGcBu2eSc1pr10w8vC/rf7K49qnkDTO227fOY0mS1tqlrbUzW2tnbrvTcbN2GwA23TJr5HfVxx3Hb7jvAPRlpoBXVduTvC3Jg5P849ban0w12ZNh7cC005J8obX29Yl2p1bVdEI7Lck3k3xu1o4DwCpQIwFYJbNc6Px2Sd6c5FFJntBa+9A6zXYlOamqHj7xvDsmefz42GS77UmePNFuW5KnJHlPa+3Ww3kTALAMaiQAq2aW6+D9lwzF5qVJbqqqh0w8ds04DWVXkt1J3lRVz8sw3eSiDOsGfnGtcWvtqqq6LMkrx08892a4IOypSZ42h/cDAIukRgKwUmaZovnY8f6FGQrU5O0ZSdJauy3J45L8fpJXJ/kfSb6d5BGttaun9ndekjcmeUmSdyW5R5LHtNY+fkTvBAAWT40EYKUccgSvtXbKLDtqre1Lcv54O1i7W5I8Z7wBwJalRgKwag73QucAAACsGAEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8AAAADoh4AEAAHRCwAMAAOiEgAcAANAJAQ8AAKATAh4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8AAAADoh4AEAAHRCwAMAAOiEgAcAANAJAQ8AAKATAh4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISAB8BK+tD93pofvP0Ny+4GACzUXS7dnbtcuvuAXx+KgAcAANCJbcvuAAAAAIPrL3joQb8+FCN4AAAAnTCCB01+T48AACAASURBVMDCfOh+b112FwBgZaytrdvoKN3BGMEDAADoxJYcwfvB29+wsE+BH3LVkxbyOgA9MEK3Ndzl0t1z/bQYgMOzGX+LjeABAAB0QsADgKOM0TuAfgl4AAAAndiSa/AW6UjWk6yt31vbxzzX8x1Ov6wnBBLr5Dgym3HGt1XxsYtfkyR54MUXHpWvDxyeVVvXLOABADNbpf/EzNuhgtVmB7CN7lcg5GiyGT/vs35gdah2q/Z30RRNAACATlRrbdl92LAzz9jRPvLueyy7G3DYjobpsqYBMg8PPufqXPnJb9Sy+7FVHHfXe7R7P/HfLbsbwCYwYvvdNno81huF2+pTzq967b//WGvtzOntRvAAAAA6YQ0eAMAGTJ5Q4VAjAJs5QjCPfa/qCMaq9mszzPpeN2PkblnHefp1D6cfhzoe0/tcb9+9/nwZwQMAAOiENXiwAnpZk2fdHfNmDd7GWIO3ehY5QrKR1zpQ2/VGVubd949d/BrryA5DzyO2HB5r8AAAADpnDR4AwCZZ5EjJRl5r1ut5bUb/lzV6tzZ6NW2993i4I12bOUI2j30auTs6GMEDAADohBE8WAHWrgHAkZl1XeFGn78Rh/vaME9G8AAAADphBA8A2BQfu/g1SQ695mrWdnAws64rnOX58xpxM3LHMhjBAwAA6IQRPABgU8w6ImfkDmB+DjmCV1VPqqq3VdVfVtUtVfWZqrqkqu4w0eaUqmoHuN15an87qurlVXXtuL/dVfWwzXhzALCZ1EgAVs0sI3jPTfKFJC9Ick2S+ye5OMkjquofttZum2h7SZJdU8//2tTXr0/yT5I8L8lfJPnXSd5dVQ9trV214XcAAMujRgKwUmYJeI9vrV038fUVVbUvyW8mOTvJ+yYe+4vW2ocOtKOqOiPJU5Oc31p747jtiiR7krw4ybkb6z4ALJUaCcBKOeQUzanCteaj4/1JG3y9c5N8K8llE/vfn+QtSc6pqmM3uD8AWBo1EoBVc7hn0Xz4eP/pqe2XVNX+qrqxqnZV1X2nHj89yd7W2s1T2/ckOSbJvQ6zPwCwKtRIAJZmw2fRrKqTMkwVeW9r7cpx861JXpvkPUmuS3KfDOsR/qiqHtxaWytyJya5YZ3d7pt4HAC2JDUSgGXbUMCrqhOSvCPJ/iTnrW1vrV2b5Gcmmn6wqn4vw6eOL0zyU2u7SNLW2/UMr31BkguS5OSTXN0BgNWyrBo5WR+3n7DzcLsPQCdmnqJZVTsynP3rnknOaa1dc7D2rbWrk/xhkgdNbN6X9T+B3Dnx+IH2d2lr7czW2pl3/bt/Z9ZuA8CmW2aNnKyP23Ycv+G+A9CXmQJeVW1P8rYkD07yj1trfzLj/qc/jdyT5NSqOm6q3WlJvpnkczPuFwBWghoJwCqZ5ULnt0vy5iSPSvKEg53ieep5Jyc5K8mHJzbvSrI9yZMn2m1L8pQk72mt3Tp71wFgudRIAFbNLIvZ/kuGYvPSJDdV1UMmHrumtXZNVb0iQ1jcnWEB+b2TXJTktiQvW2vcWruqqi5L8srxE8+9SS5McmqSp83h/QDAIqmRAKyUWQLeY8f7F463ST+f5OIM00ouTPL0JHdIcn2Gi7v+fGvtM1PPOS9DIXxJkjsn+WSSx7TWPr7x7gPAUqmRAKyUQwa81topM7R5Q5I3zPKCrbVbkjxnvAHAlqVGArBqDvdC5wAAAKyYam29S+6stqq6LslNGaa5sHh3iWO/LI798jj2y/H3W2t3XXYntgr1cen8nVgex355HPvlWbdGbsmAlyRVdWVr7cxl9+No5Ngvj2O/PI49W4Wf1eVx7JfHsV8ex371mKIJAADQCQEPAACgE1s54F267A4cxRz75XHsl8exZ6vws7o8jv3yOPbL49ivmC27Bg8AAIDvtpVH8AAAAJiwZQJeVd2jqt5aVTdW1Ver6u1VdfKy+9Wbqjq7qto6t69MtdtZVa+rquur6qaqem9V3XdZ/d5qquruVfWqqtpdVTePx/iUddrNdJyrakdVvbyqrq2qW8b9PmwR72WrmeXYV9UpB/g9aFV156m2jj1Lp0ZuPvVxcdTI5VEj+7AlAl5VHZfkfUnuk+Snk/yLJD+Q5PKqOn6ZfevYs5M8dOL26LUHqqqS7ErymCTPSvLEJNszfD/uvviubkn3SvKTSW5I8sH1GmzwOL8+yTOT/FySxyW5Nsm7q+p+m9L7re2Qx37CJfnu34OHJvnaVBvHnqVSIxdOfdx8auTyqJE9aK2t/C3Jv0ny7ST3mth2apL9SZ6z7P71dEtydpKW5NEHafOEsc0jJrbdKcm+JL+67PewFW5Jbjfx72eMx/OUwznOSc4Y2503sW1bks8k2bXs97pqtxmP/Snj9mccYl+OvdvSb2rkwo6z+ri4Y61GrvaxVyNX/LYlRvCSnJvkQ621z61taK3tTfK/MvyCs1jnJvlia+3ytQ2ttRuTvDO+HzNprd02Q7NZj/O5Sb6V5LKJdvuTvCXJOVV17Fw63YkZj/2sHHtWgRq5OtTHOVAjl0eN7MNWCXinJ/nUOtv3JDltwX05Wry5qr5dVV+uqt+eWstxsO/HyVV1wmK62L1Zj/PpSfa21m5ep90xGaZbcHguqar947qmXeus7XDsWQVq5GKpj6tBjVw+NXJFbVt2B2Z0Yoa5wNP2Jdm54L707sYkr0hyRZKvJrl/khck2V1V92+t/U2G78fn13nuvvF+Z5Kvb35XuzfrcT7Y78faftiYW5O8Nsl7klyXYW3TC5L8UVU9uLX26bGdY88qUCMXQ31cLWrk8qiRK26rBLxkmMM7rRbei8611j6R5BMTm66oqg8k+UiGheUvynDcfT8236zH2fdjzlpr1yb5mYlNH6yq38vwqeMLk/zUuN2xZ1X4Odxk6uPKUSOXRI1cfVtliuYNWT/l78z6nwwwR621jyf5bJIHjZv25cDfj8T3ZF5mPc6HardvncfYoNba1Un+MN/5PUgce1aDGrkk6uNSqZErRI1cLVsl4O3JMI932mlJ/nTBfTlaTX4Kc7Dvxxdaa6afzMesx3lPklPHU6VPt/tmks+FeZn+NNKxZxWokculPi6HGrl61MgVsVUC3q4kD6mqe65tGC+6eNb4GJuoqs5M8g+SfHjctCvJSVX18Ik2d0zy+Ph+zNOsx3lXhmv/PHmi3bYkT0nyntbarYvpbt/GEymcle/8HiSOPatBjVwS9XGp1MgVokaulmptvamxq2W8UOsnk9ySYY57S/ILSe6Q5Id9IjY/VfXmJHuTfDzJVzIsIr8oyc1JHtBau76qbpdhGP4eSZ6XYRrERUl+OMkZ4zA9h1BVTxr/+agMc9l/NsNi5etaa1ds5DhX1VuSnDO225vkwgwXFP2H4xQiJsxw7F+R4QOw3eP2e2c49ndK8iOttc9M7MuxZ6nUyMVQHxdLjVweNbIDy74Q36y3JCcneVuGM1d9LcnvZurCi25zOc4XJfnjDGcL+1aSq5NcmuRuU+1OTPKGDPOnb07yBxn+oC79PWyVW4b/hK13e/9Gj3OS2yf55SR/neQbGT5BO3vZ73FVb4c69knOT/LRDP9h2D8e199Ocm/H3m0Vb2rkQo6x+rjY461GruixVyNX/7YlRvAAAAA4tK2yBg8AAIBDEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8AAAADoh4AEAAHRCwAMAAOiEgAcAANAJAQ8AAKATAh4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8AAAADoh4AEAAHRCwAMAAOiEgAcAANAJAQ8AAKATAh4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8AAAADoh4AEAAHRCwAMAAOiEgAcAANAJAQ8AAKATAh4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8AAAADoh4AEAAHRCwAMAAOiEgAcAANAJAQ8AAKATAh4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8AAAADoh4AEAAHRCwAMAAOiEgAcAANAJAQ8AAKATAh4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8AAAADoh4AEAAHRCwAMAAOiEgAcAANAJAQ8AAKATAh4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQAAdELAAwAA6ISABwAA0AkBDwAAoBMCHgAAQCcEPAAAgE4IeAAAAJ0Q8AAAADoh4AEAAHRCwAMAAOiEgAcAANAJAQ8AAKATAh4AAEAnBDwAAIBOCHgAAACdEPAAAAA6IeABAAB0QsADAADohIAHAADQCQEPAACgEwIeAABAJwQ8AACATgh4AAAAnRDwAAAAOiHgAQD/P3v3Hyv7Xd93/vUm1+bWNinXBVUbg9dGbIlsEaBxSCip+ZGqJi0YrYCgJlltjYgbtyKr0hKtIcqSLKmlUCrUqInwBlC0IcJqSZobVQqUwBpncyEQYlpuESmKE5vFVe1cxwm2YzD+7B8zB4bJOffMuWfO/Hjfx0MaHd/vfM+c73yvzYfn+Xw/8wWgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAm1hJ4VfX0qvp3VfVgVf1ZVf1qVV2+jmMBgE1ijATgMGqMsdofWHVRks8keTTJTyQZSd6W5KIk3zHGeGilBwQAG8IYCcBhHVvDz/yRJM9I8qwxxheSpKr+U5L/muQfJflXazgmANgExkgADmUdM3i/leT4GOOFc9tvT5IxxotWekAAsCGMkQAc1jrW4F2d5LO7bD+d5KoVHwsAbBJjJACHso5LNC9N8sAu288kObHIC1xYTxzHc/FSDwqAzfMXeShfGY/Wuo9jhQ41Rh47fvG48EmXLv2gANg8j9z/xfvHGE+d376OwEsmi8bnnXUAr6obk9yYJMdzUb67vu8ojguADfKJ8VvrPoR1ONAYOTs+XnDJiTzrVf/0qI4LgA1y57v+2R/vtn0dl2g+kMlvKOedyO6/tUySjDFuHWNcM8a45oI88cgODgDW6MBj5Oz4eOy4q1sAznfrCLzTmawxmHdVkv+y4mMBgE1ijATgUNYReCeTfE9VPWNnQ1VdkeSF0+cA4HxljATgUNYReP9Xkj9K8utV9cqquj7Jrye5J8m71nA8ALApjJEAHMrKA2+M8VCSlyb5gyT/d5L3JbkryUvHGF9e9fEAwKYwRgJwWGv5FM0xxt1JXrWOnw0Am8wYCcBhrOMSTQAAAI6AwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNHFv3AbCdHv6fv/tA+1/0a584oiMBAAB2mMEDAABoQuABAAA0IfAAAACa2Mo1eI8/+eI8/NKDrQFblLVi3+yga+0WeR3nGAAAjoYZPAAAgCYEHgAAQBMCDwAAoImtXIN3lPZac7bqdWPLWvu2iXbem7V4AACwXGbwAAAAmhB4AAAATQg8AACAJqzBW9BR3cet81q7/ViLBwAAy2UGDwAAoAmBBwAA0ITAAwAAaMIavHMwv25uvzVk5/M6u0Xsd37Odn7P9dwusu7vKP7erDcEAOAomcEDAABoQuABAAA04RLNJXAJ5tE6ivO7rr+zVf5cl4MCAJx/zOABAAA0IfAAAACaEHgAAABNWIMHTe213s/aPACAvszgAQAANCHwAAAAmhB4AAAATViDB+eZdd+30RpAAICjYwYPAACgCYEHAADQhMADAABowho8YKXWvQawi1WuZVzn39njH/n42n42AGwjM3gAAABNCDwAAIAmBB4AAEAT1uABbKH5dXHLWJNnfSQAbD8zeAAAAE2YwQNoYLfZt/1m9czYAUA/S53Bq6oXV9XY5fGnc/udqKpfrKr7q+qhqvpwVT17mccCAJvC+AjAqhzVDN6PJfnkzJ8f2/mHqqokJ5NcmeQNSR5IcnOSj1bVc8cYXzyiYwKAdTM+AnCkjirwPjfG2OvutNcn+d4kLx1jfDRJqupUkruS/Hgmgx8Ah+QSzI1kfATgSK3jQ1auT/KlncErScYYDyb5jSSvXMPxAMAmMD4CcGhHFXjvq6qvVdWfVNWvVNXlM89dneSzu3zP6SSXV9UlR3RMALBuxkcAjtSyL9F8MMk7ktye5M+SPC/Jm5OcqqrnjTH+e5JLk/zRLt97Zvr1RJIvzz9ZVTcmuTFJLvwrT17yYQPAkVrJ+HjBJSeWfuAAbJelBt4Y4/eT/P7Mptur6mNJfjeTtQM/kaSSjF2+vfZ57VuT3Jokl5x4+m7fDwAbaVXj40VPNT4CnO+OfA3eGOPTSf4gyXdNN53J5LeU83Z+7fjAUR8TAKyb8RGAo7CqD1mZ/a3k6UzWGcy7KsndY4y/dPkJADRlfARgqY488KrqmiR/I8knpptOJrmsql40s8+3JnnF9DkAaM/4CMBRWOoavKp6Xyb36/l0kj/NZBH5zUn+vyQ/N93tZJJTSX65qt6Ub9zItZL87DKPBwA2gfERgFVZ9qdofjbJP0jyhiQXJflvSX41yf8xxrg/ScYYj1fVy5P8yyQ/n+R4JgPaS8YY9yz5eABgExgfAViJZX+K5i1JbllgvzNJXjd9AEBrxkcAVmVVH7ICAADAERN4AAAATQg8AACAJgQeAABAEwIPAACgia0MvK8+KfnStbXuwwAAANgoWxl4AAAA/GUCDwAAoIml3uh81TblMs1v+9j4pj8fxXHN/4xlOMxx7nc8i7z2UbynbbOOf4eddwCAvszgAQAANCHwAAAAmhB4AAAATWz1GrxNsYp1VHv9jHWtp9qU9Y/bYpPO186xWIsHANCPGTwAAIAmBB4AAEATAg8AAKAJa/C23Cat7TqoRY99Z63YNr/XTbSp92ucXSN40H9HAADOd2bwAAAAmhB4AAAATQg8AACAJqzBY+NZe7c9lvl3dZDXOh//HbHuEADYjRk8AACAJszgAWyh/WYtzfABwPnJDB4AAEATAg8AAKAJgQcAANCENXgADZ1tjZ71eQDQlxk8AACAJgQeAABAEy7RBDjPzF++6ZJNAOjDDB4AAEATAg8AAKAJgQcAANCENXgA57mz3VJhL9btAcBmMoMHAADQhMADAABoQuABAAA0YQ0eAAe237q9g6zR23kt6/oA4PDM4AEAADQh8AAAAJoQeAAAAE1YgwfA0p3LvfV2+56vfnIZRwMA5w8zeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaWCjwquppVfVzVXWqqh6uqlFVV+yy34mq+sWqur+qHqqqD1fVs3fZ73hVvb2q7q2qR6ave+3h3w4ArI7xEYBNs+gM3jOT/ECSB5LcsdsOVVVJTiZ5WZI3JHlVkguSfLSqnja3+7uT/EiSn0zy8iT3JvlgVT33oG8AANbI+AjARjm24H4fG2P89SSpqtcn+bu77HN9ku9N8tIxxken+55KcleSH0/yY9Ntz0nyg0leN8Z473Tb7UlOJ/np6esAwDYwPgKwURaawRtjPL7Abtcn+dLO4DX9vgeT/EaSV87t99Ukt83s91iS9ye5rqqeuMgxAcC6GR8B2DTL/JCVq5N8dpftp5Nc97J7GAAAIABJREFUXlWXzOx31xjj4V32uzCTy10AoAvjIwArs8zAuzSTNQjzzky/nlhwv0uXeEwAsG7GRwBWZpmBV0nGHtvPZb9vfrLqxqr6VFV96mtffugcDxEAVm5l4+Njf2F8BDjfLTPwzmT33y7u/GbygQX3O7PLcxlj3DrGuGaMcc23XHLxoQ4UAFZoZePjsePGR4Dz3TID73Qm6wfmXZXk7jHGl2f2u7KqLtplv68k+cISjwkA1s34CMDKLDPwTia5rKpetLOhqr41ySumz83ud0GS18zsdyzJa5N8aIzx6BKPCQDWzfgIwMoseh+8VNWrp//4ndOv319V9yW5b4xxeyYD06kkv1xVb8rkkpObM1k78LM7rzPGuLOqbkvyzqq6IJP7AN2U5MokP3TI9wMAK2V8BGCTLBx4Sf7t3J9/fvr19iQvHmM8XlUvT/Ivp88dz2RAe8kY4565770hyc8keVuSJyf5TJKXjTE+fcDjB4B1Mz4CsDEWDrwxxlk/xWu6z5kkr5s+zrbfI0neOH0AwNYyPgKwSZa5Bg8AAIA1EngAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQxLF1H0An177g9Fmf/9ipq1d0JACwfZ5y66kkyf03vuDr237vrb+QJPnOt960lmMC2DZm8AAAAJowg7eH/Wbj1vWaZgEB6Gp25m7HOmbudptJPJd9FjU/S7nM1wbOP2bwAAAAmjCDBwBsvKOY1dprfd/Ozzjb+r9lHsdeP3/H/Hu3LhE4m60MvCdd/MiRXEJ5WO+9/I6zPn/D3X/70D9jE993Ny6DBdg8ywyqnWD6zpz9kshNCahNPS5gM7lEEwAAoImtnMEDADionZm6eYvODs5+/36XUQKsixk8AACAJszg7WG/9XSres1lrNvbNHudh015r25YD9DTYWfXzvb9Zu6ATWEGDwAAoAmBBwAA0MRCgVdVT6uqn6uqU1X1cFWNqrpibp8rptt3ezx5bt/jVfX2qrq3qh6Zvu61y3tbAHD0jI8AbJpF1+A9M8kPJPm9JHck+btn2feWJCfntv353J/fneTvJ3lTkj9M8k+SfLCqXjDGuHPBY/q6o1gvtyk6v7d5m/Ze91oTuLNGz1o8IBs+PrKd9rqRuU/qBBaxaOB9bIzx15Okql6fsw9gfzjG+PheT1bVc5L8YJLXjTHeO912e5LTSX46yfULHhMArJvxEYCNslDgjTEeX+LPvD7JV5PcNvP6j1XV+5P871X1xDHGo0v8eQBwJIyPHIX5mbuvz+jlpt12b2GvWctNsy3HyfntKD5k5ZaqeqyqHqyqk1X17Lnnr05y1xjj4bntp5NcmMnlLgDQjfERgCO3zPvgPZrkXUk+lOS+JN+e5M1Jfqeqnj/G+Nx0v0uTPLDL95+Zef6srrjwyxu3Xot+5v8dm1+Tt9/98nazaev25t/Dph0fNLGy8ZGeus0WPeXWU39pHeG2vMdtOU7Ob0sLvDHGvUl+dGbTHVX1m5n85vEtSX54ur2SjF1eos72+lV1Y5Ibk+Tyy9yfHYDtsMrx8YJLThz6eAHYbkdaSmOMe6rqt5N818zmM0ku32X3EzPP7/Zatya5NUmuec7x3QZAANgKRzU+XvTUpxsf2TgH+fTPnX13rPsTQ/c69vntPuGUTbKKG53P/0bydJIrq+qiuf2uSvKVJF9YwTEBwLoZHwFYuiOdwauqy5O8MMmvzWw+meSnkrwmyS9N9zuW5LVJPuQTwujsXNbtzTvXdXKL/OxzPT5r9+BgjI+cT+Zntc42y7XXc4eZITvM9+71PYu+JzN7rMPCgVdVr57+43dOv35/Vd2X5L4xxu1V9Y5MZgRPZbKI/FlJbk7yeJJ/sfM6Y4w7q+q2JO+sqguS3JXkpiRXJvmhQ74fAFgp4yMAm+QgM3j/du7PPz/9enuSF2dyaclNSf5hkicluT/JR5L81Bjj83Pfe0OSn0nytiRPTvKZJC8bY3z6AMcDAJvA+AhHbK+1bvNr9maf2+vPizjs/e7M3LFOCwfeGOOsn+I1xnhPkvcs+FqPJHnj9AEAW8v4CMAmqTG27wO3rnnO8fG7H3z6ug8DDmz+XnodWY/HMn3pHe/Mo3ffc9aA4hsueurTx7Ne9U/XfRic5+Znv9Y1m+WTLunuznf9s98bY1wzv90N5QAAWJpzuazxKOJrmR+EsujtEjbVYS85Zbus4jYJAAAArIAZPFih915+xzl93zIv7TzXY1jEDXf/7b90qwWXbAKcX/aa1dqZRbru25779ee+PrOU9c0s7Tb7tt/M3H7Pb9qM2aYcB6thBg8AAKAJM3gAACzN3mvf7vz6nzf9NgR7vYf9btOws33dM2Z7Hdci523TZh85ODN4AAAATbhNArBSO+sJD7I2b35d32FYE7hd3CbhYNwmgW72monaFIt+uubvvfUX1n7biMPalJm9bT1/R2Gv2ySYwQMAAGjCGjwAADbSXmvdDuIoZ3z2Wts2/7Ou+7bnJjcu/cd/3VG8x/nXvO7bnjt5Yp/38ZRbTy3tOHZ7X2bu9mcGDwAAoAlr8ICtscz7AZ6rnTV8177g9Df981H9nPOdNXgHYw0esCrWwq2fNXgAAADNWYMHbI33Xn7H0l7rXGcDZ2frjmLm7lxf24wfwPlrHbNp5/KzVrFWEDN4AAAAbZjBAwCALbbf7NUis1yruM/dUX6S6Y793uvsPQlntyXrv8ffspjBAwAAaMKnaAIs4Gxr9ubXBu7su7N9/s+H/XnLsg3r9nyK5sH4FE1gWebvPbita9x2m9E713V78zN965758ymaAAAAzVmDBwAAfJNFZ7fmZ8OecuupjZrt2+1Y9jq+/Wb25mfqNnXNnks0AbZY90s5XaJ5MC7RBA7LbQe+4SjPxV6Xd+72ITB7cYkmAABAcy7RBAAAkhz80szdvq/LLOAqZ+52LOOyTzN4AAAATZjBA9hie92iYZmufcHpc/7ebbgVAwAHd7YPKtn2mbtFncstGM7lJusHvR2DGTwAAIAmzOABAABLcb7M3iUHuwXDXhaZlTvoujwzeAAAAE2YwQNoZGdN3iruj7eIRdbvWacHwPlmr7V6s2sYz/XTSM3gAQAANGEGDwAAYIX2mpWb3X6u6xnN4AEAADRhBg+godn7423Kery9nG2d3n+4+JEVHgkAbD8zeAAAAE0IPAAAgCYEHgAAQBPW4AE0N7se72w2fa0eALA/M3gAAABNCDwAAIAmBB4AAEAT1uABkGSxtXrW6QHAZjODBwAA0ITAAwAAaMIlmgAsbNFbLuxwSScArJYZPAAAgCYEHgAAQBMCDwAAoAlr8AA4Mm69AACrZQYPAACgCTN4AKzV2Wb5nn/hl1d4JACw/czgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoYt/Aq6pXV9UHquqPq+qRqvp8Vd1SVU+a2+9EVf1iVd1fVQ9V1Yer6tm7vN7xqnp7Vd07fb1TVXXtMt8UAKyCMRKATbPIDN4/T/K1JG9O8rIkv5DkpiT/saqekCRVVUlOTp9/Q5JXJbkgyUer6mlzr/fuJD+S5CeTvDzJvUk+WFXPPfS7AYDVMkYCsFGOLbDPK8YY9838+faqOpPkl5K8OMlHklyf5HuTvHSM8dEkqapTSe5K8uNJfmy67TlJfjDJ68YY751uuz3J6SQ/PX0dANgWxkgANsq+M3hzA9eOT06/Xjb9en2SL+0MXNPvezDJbyR55cz3XZ/kq0lum9nvsSTvT3JdVT3xQEcPAGtkjARg05zrh6y8aPr1c9OvVyf57C77nU5yeVVdMrPfXWOMh3fZ78IkzzzH4wGATWGMBGBtDhx4VXVZJpeKfHiM8anp5kuTPLDL7memX08suN+lBz0eANgUxkgA1u1AgTf9LeOvJ3ksyQ2zTyUZu33LLn9eZL/dfvaNVfWpqvrUfX/ytQWPGABWY11j5Oz4+NhfPHSAIwago4UDr6qOZ/IpYM9Ict0Y44szT5/J7r9Z3Pmt5AML7ndml+eSJGOMW8cY14wxrnnqX/uWRQ8bAI7cOsfI2fHx2PGLD3zsAPSyUOBV1QVJPpDk+Un+3hjjP8/tcjqTtQPzrkpy9xjjyzP7XVlVF+2y31eSfGHRAweATWCMBGCTLHKj8yckeV+S70vyyjHGx3fZ7WSSy6rqRTPf961JXjF9bna/C5K8Zma/Y0lem+RDY4xHz+VNAMA6GCMB2DSL3Afv32Qy2PxMkoeq6ntmnvvi9DKUk0lOJfnlqnpTJpeb3JzJuoGf3dl5jHFnVd2W5J3T33jelckNYa9M8kNLeD8AsErGSAA2yiKXaH7/9OtbMhmgZh+vT5IxxuNJXp7kPyb5+SS/luRrSV4yxrhn7vVuSPLeJG9L8h+SPD3Jy8YYnz7UOwGA1TNGArBR9p3BG2NcscgLjTHOJHnd9HG2/R5J8sbpAwC2ljESgE1zrjc6BwAAYMMIPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJvYNvKp6dVV9oKr+uKoeqarPV9UtVfWkmX2uqKqxx+PJc693vKreXlX3Tl/vVFVdexRvDgCOkjESgE1zbIF9/nmSu5O8OckXkzwvyVuTvKSq/tYY4/GZfW9JcnLu+/987s/vTvL3k7wpyR8m+SdJPlhVLxhj3HngdwAA62OMBGCjLBJ4rxhj3Dfz59ur6kySX0ry4iQfmXnuD8cYH9/rharqOUl+MMnrxhjvnW67PcnpJD+d5PqDHT4ArJUxEoCNsu8lmnMD145PTr9edsCfd32Srya5beb1H0vy/iTXVdUTD/h6ALA2xkgANs25fsjKi6ZfPze3/ZaqeqyqHqyqk1X17Lnnr05y1xjj4bntp5NcmOSZ53g8ALApjJEArM0il2h+k6q6LJNLRT48xvjUdPOjSd6V5ENJ7kvy7ZmsR/idqnr+GGNnkLs0yQO7vOyZmecBYCsZIwFYtwMFXlVdkuTXkzyW5Iad7WOMe5P86Myud1TVb2byW8e3JPnhnZdIMnZ76QV+9o1JbkySyy87cJcCwJFa1xg5Oz5ecMmJcz18AJpY+BLNqjqeyad/PSPJdWOML55t/zHGPUl+O8l3zWw+k91/A3li5vm9Xu/WMcY1Y4xrnvrXvmXRwwaAI7fOMXJ2fDx2/OIDHzsAvSwUeFV1QZIPJHl+kr83xvjPC77+/G8jTye5sqoumtvvqiRfSfKFBV8XADaCMRKATbLIjc6fkOR9Sb4vySvP9hHPc993eZIXJvnEzOaTSS5I8pqZ/Y4leW2SD40xHl380AFgvYyRAGyaRRaz/ZtMBpufSfJQVX3PzHNfHGN8sarekUksnspkAfmzktyc5PEk/2Jn5zHGnVV1W5J3Tn/jeVeSm5JcmeSHlvB+AGCVjJEAbJRFAu/7p1/fMn3M+qkkb83kspKbkvzDJE9Kcn8mN3f9qTHG5+e+54ZMBsK3JXlyks8kedkY49MHP3wAWCtjJAAbZd/AG2NcscA+70nynkV+4BjjkSRvnD4AYGsZIwHYNOd6o3MAAAA2jMADAABoosbY7Z6qm62q7kvyUCbrGFi9p8S5Xxfnfn2c+/X4H8cYT133QWwL4+Pa+d+J9XHu18e5X59dx8itDLwkqapPjTGuWfdxnI+c+/Vx7tfHuWdb+Hd1fZz79XHu18e53zwu0QQAAGhC4AEAADSxzYF367oP4Dzm3K+Pc78+zj3bwr+r6+Pcr49zvz7O/YbZ2jV4AAAAfLNtnsEDAABgxtYEXlU9var+XVU9WFV/VlW/WlWXr/u4uqmqF1fV2OXxp3P7naiqX6yq+6vqoar6cFU9e13HvW2q6mlV9XNVdaqqHp6e4yt22W+h81xVx6vq7VV1b1U9Mn3da1fxXrbNIue+qq7Y47+DUVVPntvXuWftjJFHz/i4OsbI9TFG9rAVgVdVFyX5SJJvT/K/JvlfkvxPST5aVRev89ga+7EkL5h5/J2dJ6qqkpxM8rIkb0jyqiQXZPL38bTVH+pWemaSH0jyQJI7dtvhgOf53Ul+JMlPJnl5knuTfLCqnnskR7/d9j33M27JN/938IIkfz63j3PPWhkjV874ePSMketjjOxgjLHxjyT/W5KvJXnmzLYrkzyW5I3rPr5OjyQvTjKS/J2z7PPK6T4vmdn2V5OcSfKv1/0etuGR5Akz//z66fm84lzOc5LnTPe7YWbbsSSfT3Jy3e910x4Lnvsrpttfv89rOfcea38YI1d2no2PqzvXxsjNPvfGyA1/bMUMXpLrk3x8jPGFnQ1jjLuS/L+Z/AfOal2f5EtjjI/ubBhjPJjkN+LvYyFjjMcX2G3R83x9kq8muW1mv8eSvD/JdVX1xKUcdBMLnvtFOfdsAmPk5jA+LoExcn2MkT1sS+BdneSzu2w/neSqFR/L+eJ9VfW1qvqTqvqVubUcZ/v7uLyqLlnNIba36Hm+OsldY4yHd9nvwkwut+Dc3FJVj03XNZ3cZW2Hc88mMEaulvFxMxgj188YuaGOrfsAFnRpJtcCzzuT5MSKj6W7B5O8I8ntSf4syfOSvDnJqap63hjjv2fy9/FHu3zvmenXE0m+fPSH2t6i5/ls/33svA4H82iSdyX5UJL7Mlnb9OYkv1NVzx9jfG66n3PPJjBGrobxcbMYI9fHGLnhtiXwksk1vPNq5UfR3Bjj95P8/sym26vqY0l+N5OF5T+RyXn393H0Fj3P/j6WbIxxb5Ifndl0R1X9Zia/dXxLkh+ebnfu2RT+PTxixseNY4xcE2Pk5tuWSzQfyO6VfyK7/2aAJRpjfDrJHyT5rummM9n77yPxd7Isi57n/fY7s8tzHNAY454kv51v/HeQOPdsBmPkmhgf18oYuUGMkZtlWwLvdCbX8c67Ksl/WfGxnK9mfwtztr+Pu8cYLj9ZjkXP8+kkV04/Kn1+v68k+UJYlvnfRjr3bAJj5HoZH9fDGLl5jJEbYlsC72SS76mqZ+xsmN508YXT5zhCVXVNkr+R5BPTTSeTXFZVL5rZ51uTvCL+PpZp0fN8MpN7/7xmZr9jSV6b5ENjjEdXc7i9TT9I4YX5xn8HiXPPZjBGronxca2MkRvEGLlZaozdLo3dLNMbtX4mySOZXOM+kvyfSZ6U5Dv8Rmx5qup9Se5K8ukkf5rJIvKbkzyc5G+OMe6vqidkMg3/9CRvyuQyiJuTfEeS50yn6dlHVb16+o/fl8m17P84k8XK940xbj/Iea6q9ye5brrfXUluyuSGon9regkRMxY49+/I5Bdgp6bbn5XJuf+rSb57jPH5mddy7lkrY+RqGB9Xyxi5PsbIBtZ9I75FH0kuT/KBTD656s+T/PvM3XjRYynn+eYk/ymTTwv7apJ7ktya5H+Y2+/SJO/J5Prph5P8Vib/g7r297Atj0z+T9huj//noOc5yV9J8q+S/Lckf5HJb9BevO73uKmP/c59ktcl+WQm/4fhsel5/ZUkz3LuPTbxYYxcyTk2Pq72fBsjN/TcGyM3/7EVM3gAAADsb1vW4AEAALAPgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMAD/n/27j9G8ru+7/jrHd3ZV9sQzoJEqsG1ESqpETWIg0JBMRApQAImEhAUyB/BgmusCqSQkNYQpYZCLOVHhRQFisuPRoUKK4GUiyoFQkAHUY4E45iGKyJFcYJdXNXuOU44jOHwp3/sbBlP9+5mb2dnZt/3eEir9c5857ufmUN89NzP9zMDAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAnihKmAAAgAElEQVQATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNrCTwqupxVfW7VXV/Vf1dVX20qi5fxVgAYJ2YIwHYiRpjLPcXVl2U5ItJHkzyS0lGkrcnuSjJPx1jnFzqgABgTZgjAdipfSv4na9L8vgkTxxjfDVJquq/JfkfSf5Fkn+3gjEBwDowRwKwI6tYwfujJAfGGM+euf1okowxrlnqgABgTZgjAdipVezBe1KSL21x+/EkVy15LACwTsyRAOzIKi7RvDTJfVvcfiLJwXlOcEFdOA7k4oUOCoD1862czLfHg7XqcSzRjubIfQcuHhc84tKFDwqA9fPAvXfdO8Z4zOztqwi8ZGPT+KwzTuBVdTjJ4SQ5kIvyz+pHdmNcAKyRPx1/tOohrMK25sjp+XH/JQfzxJf93G6NC4A1cvt7fv5vtrp9FZdo3peNv1DOOpit/2qZJBlj3DzGODTGOLQ/F+7a4ABghbY9R07Pj/sOuLoF4Hy3isA7no09BrOuSvLflzwWAFgn5kgAdmQVgXckyTOr6vGbN1TVFUmePbkPAM5X5kgAdmQVgfcfkvx1ko9V1Uur6tokH0tyZ5L3rGA8ALAuzJEA7MjSA2+McTLJ85P8ZZL/lORDSe5I8vwxxjeWPR4AWBfmSAB2aiXvojnG+FqSl63idwPAOjNHArATq7hEEwAAgF0g8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmlho4FXVc6tqbPH1tzPHHayq91bVvVV1sqo+WVVPXuRYAGBdmB8BWJZ9u3TeNyT5/NTPpzb/o6oqyZEkVyZ5fZL7ktyQ5NNV9ZQxxl27NCYAWDXzIwC7arcC78tjjM+d5r5rkzwnyfPHGJ9Okqo6luSOJL+YjckPADoyPwKwq1axB+/aJF/fnLySZIxxf5LfT/LSFYwHANaB+RGAHdutwPtQVX23qv5PVf3nqrp86r4nJfnSFo85nuTyqrpkl8YEAKtmfgRgVy36Es37k/xGkqNJ/i7JU5O8OcmxqnrqGON/J7k0yV9v8dgTk+8Hk3xjweMCgFUyPwKwFAsNvDHGnyf586mbjlbVZ5L8WTb2DvxSkkoytnh4nencVXU4yeEkOZCLFjJeAFiGZc2P+y85uJDxArB37foevDHGbUn+MsnTJzedyMZfKWdtzkr3neY8N48xDo0xDu3PhYsfKAAs0W7Mj/sOXLz4gQKwpyzrTVam/yp5PBv7DGZdleRrYwyXnwBwvjA/ArBQux54VXUoyT9O8qeTm44kuayqrpk65pFJXjK5DwDaMz8CsBsWugevqj6Ujc/ruS3J32ZjE/kNSf5nkt+cHHYkybEkH6yqN+V7H+RaSX51keM5X917+FlzHffom4/t8kgASMyPACzPot9F80tJfirJ65NclOR/Jflokn8zxrg3ScYYD1XVi5P8epJ3JTmQjQnteWOMOxc8HgBYB+ZHAJZi0e+ieVOSm+Y47kSS6yZfANCa+RGAZVnWm6wAAACwyxZ9ieZSnHr0xbn3ZWffZ3Y+7TGbd9/dVo85n14nAADozAoeAABAEwIPAACgCYEHAADQxJ7cgzevc9mXtmnd96Xt5Lmd7Vzr/twBAICtWcEDAABoQuABAAA00foSzZ1Y5CWQrK+t/p27XqLa+ZJlAAA2WMEDAABoQuABAAA0IfAAAACasAeP/8/5vv/wdM9/3n1oHV+/nb4m6+hs/057+bkBAOcvK3gAAABNCDwAAIAmBB4AAEAT9uDBnDrurdupM70m67iHbTv/hvboAQB7kRU8AACAJgQeAABAEwIPAACgCXvwgF2x3T2Lu7mnbTf2T+7Wnsy9trdvt/emnvrI53b1/ADQjRU8AACAJqzgAWthEe/I2eGdTtfx3Ts7vK4AcL6wggcAANCEwAMAAGhC4AEAADRhDx6w9uwB+555X4t59up5XQGgHyt4AAAATQg8AACAJlyiCdCQyy8B4PxkBQ8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJvategDnog6eyv6fuCff+S+PSZLs/4l7kuT//byVzWPO1ZnODQAAsA6s4AEAADQh8AAAAJrYk5dobpq97HKnl2Fu53dtZZGXcc7+vnM59268HtsdxzyXzwIAAIthBQ8AAKAJgQcAANCEwAMAAGhiT+/BWzen2/M2u//sXPbGnW1P3m7uP1zE75n+WAsAAGB3WMEDAABoQuABAAA0IfAAAACasAdvCXZjf9yy9twt0ryfiTfvXkYAAODhrOABAAA0YQWPpdvJO3GeznZX9xaxAmpFEQCAdWMFDwAAoAmBBwAA0ITAAwAAaMIePFpYxbuKzvM7u+7Ts4cRAGA9WcEDAABoQuABAAA0IfAAAACasAcPdtHsXrV133e2zL2M2/ldy3jdNsez7v9GAABnYgUPAACgCYEHAADQhEs0YYkWccnmKj4SYtXW4dLRZV26ue7/vi5hBYD1ZgUPAACgCYEHAADQhMADAABowh48WKF132/F92xn/2Tnf9d5n5u9egCwGlbwAAAAmhB4AAAATQg8AACAJuzBAzgHnffZLcI8r499egCweFbwAAAAmhB4AAAATQg8AACAJuzBA2AltvPZggDAfOZawauqx1bVb1bVsar6ZlWNqrpii+MOVtV7q+reqjpZVZ+sqidvcdyBqvq1qrq7qh6YnPeHd/50AGB5zI8ArJt5L9F8QpKfTHJfks9udUBVVZIjSV6Y5PVJXpZkf5JPV9VjZw5/X5LXJfnlJC9OcneSj1fVU7b7BABghcyPAKyVeS/R/MwY4weTpKpem+RHtzjm2iTPSfL8McanJ8ceS3JHkl9M8obJbVcneVWS68YYH5jcdjTJ8SRvm5wHAPYC8yMAa2WuwBtjPDTHYdcm+frm5DV53P1V9ftJXprJBDY57jtJbpk67lRVfTjJv66qC8cYD877BADoYavPzqtPnVrBSOZnfgRg3SzyXTSflORLW9x+PMnlVXXJ1HF3jDG+ucVxF2TjchcA6ML8CMDSLDLwLs3GHoRZJybfD8553KULHBMArJr5EYClWWTgVZJxmtvP5biH31l1uKpurapbT90/+8dNAFhby5sfv3XyHIcIQBeLDLwT2fqvi5t/mbxvzuNObHFfxhg3jzEOjTEO7fv+i3Y0UABYouXNjwcu3tFAAdj7Fhl4x7Oxf2DWVUm+Nsb4xtRxV1bVbKVdleTbSb66wDEBwKqZHwFYmkUG3pEkl1XVNZs3VNUjk7xkct/0cfuTvGLquH1JXpnkE94hDIBmzI8ALM28n4OXqnr55D+fNvn+oqq6J8k9Y4yj2ZiYjiX5YFW9KRuXnNyQjb0Dv7p5njHG7VV1S5J3VtX+bHwO0PVJrkzy6h0+HwBYKvMjAOtk7sBL8jszP79r8v1okueOMR6qqhcn+fXJfQeyMaE9b4xx58xjX5PkHUnenuRRSb6Y5IVjjNu2OX4AWDXzIwBrY+7AG2Oc8V28JsecSHLd5OtMxz2Q5I2TLwDYs8yPAKyTRe7BAwAAYIUEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAE3MFXlU9tqp+s6qOVdU3q2pU1RUzx1wxuX2rr0fNHHugqn6tqu6uqgcm5/3hxT0tANh95kcA1s28K3hPSPKTSe5L8tmzHHtTkmfNfP39zDHvS/K6JL+c5MVJ7k7y8ap6ypzjAYB1YH4EYK3sm/O4z4wxfjBJquq1SX70DMf+1Rjjc6e7s6quTvKqJNeNMT4wue1okuNJ3pbk2jnHBACrZn4EYK3MtYI3xnhogb/z2iTfSXLL1PlPJflwkhdU1YUL/F0AsGvMjwCsm914k5WbqupUVd1fVUeq6skz9z8pyR1jjG/O3H48yQXZuNwFALoxPwKw6+a9RHMeDyZ5T5JPJLknyQ8leXOSP6mqZ4wxvjw57tJs7FWYdWLqfgDowvwIwNIsLPDGGHcn+dmpmz5bVX+Qjb88viXJT09uryRji1PUmc5fVYeTHE6SC37gkTseLwAswzLnx/2XHNzxeAHY23b1c/DGGHcm+eMkT5+6+US2/ivkwan7tzrXzWOMQ2OMQ/u+/6LFDhQAlmjX5scDFy92oADsOcv4oPPZv0geT3JlVc1W2lVJvp3kq0sYEwCsmvkRgIXb1cCrqsuTPDvJn07dfCTJ/iSvmDpuX5JXJvnEGOPB3RwTAKya+RGA3TL3HryqevnkP582+f6iqronyT1jjKNV9RvZCMZj2dhE/sQkNyR5KMmvbJ5njHF7Vd2S5J1VtT/JHUmuT3Jlklfv8PkAwFKZHwFYJ9t5k5Xfmfn5XZPvR5M8NxuXllyf5GeSPCLJvUk+leStY4yvzDz2NUnekeTtSR6V5ItJXjjGuG0b4wGAdWB+BGBtzB14Y4wzvovXGOP9Sd4/57keSPLGyRcA7FnmRwDWyTLeZAUAAIAlEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE2cNfCq6uVV9ZGq+puqeqCqvlJVN1XVI2aOO1hV762qe6vqZFV9sqqevMX5DlTVr1XV3ZPzHauqH17kkwKAZTBHArBu5lnB+4Uk303y5iQvTPLuJNcn+cOq+r4kqapKcmRy/+uTvCzJ/iSfrqrHzpzvfUlel+SXk7w4yd1JPl5VT9nxswGA5TJHArBW9s1xzEvGGPdM/Xy0qk4k+e0kz03yqSTXJnlOkuePMT6dJFV1LMkdSX4xyRsmt12d5FVJrhtjfGBy29Ekx5O8bXIeANgrzJEArJWzruDNTFybPj/5ftnk+7VJvr45cU0ed3+S30/y0qnHXZvkO0lumTruVJIPJ3lBVV24rdEDwAqZIwFYN+f6JivXTL5/efL9SUm+tMVxx5NcXlWXTB13xxjjm1scd0GSJ5zjeABgXZgjAViZbQdeVV2WjUtFPjnGuHVy86VJ7tvi8BOT7wfnPO7S7Y4HANaFORKAVdtW4E3+yvixJKeSvGb6riRjq4ds8fM8x231uw9X1a1Vdeup+2f/uAkAq7WqOfJh8+O3Tm5jxAB0NHfgVdWBbLwL2OOTvGCMcdfU3Sey9V8WN/8qed+cx53Y4r4kyRjj5jHGoTHGoX3ff9G8wwaAXbfKOfJh8+OBi7c9dgB6mSvwqmp/ko8keUaSHxtj/MXMIcezsXdg1lVJvjbG+MbUcVdW1WyhXZXk20m+Ou/AAWAdmCMBWCfzfND59yX5UJIfSfLSMcbntjjsSJLLquqaqcc9MslLJvdNH7c/ySumjtuX5JVJPjHGePBcngQArII5EoB1M8/n4P1WNiabdyQ5WVXPnLrvrsllKEeSHEvywap6UzYuN7khG/sGfnXz4DHG7VV1S5J3Tv7ieUc2PhD2yiSvXsDzAYBlMkcCsFbmuUTzRZPvb8nGBDX99dokGWM8lOTFSf4wybuS/F6S7yZ53hjjzpnzvSbJB5K8Pcl/TfK4JC8cY9y2o2cCAMtnjgRgrZx1BW+MccU8JxpjnEhy3eTrTMc9kOSNky8A2LPMkQCsm3P9oHMAAADWjMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAACgCYEHAADQhMADAABoQuABAAA0IfAAAACaEHgAAABNCDwAAIAmBB4AAEATAg8AAKAJgQcAANCEwAMAAGhC4AEAADQh8AAAAJoQeAAAAE0IPAAAgCYEHgAAQBMCDwAAoAmBBwAA0ITAAwAAaELgAQAANCHwAAAAmhB4AAAATQg8AACAJgQeAABAEwIPAJjbo28+lkfffGzVwwDgNAQeAABAE/tWPYBz8U/+wX353FN+d9XDmNszb3/5qocAAAtx7+FnrXoIAJyBFTwAAIAmBB4ANGffHMD5Q+ABAAA0sSf34O0157Jf0L49ABbFvjmA84cVPAAAgCYEHgAAQBMCDwAAoAl78NbU2fbt2aMHAADMsoIHAADQhMADAM5LX7jx3fnCje+e+/Z5+MxBYNUEHgAAQBP24O1R83y2nn16AJyvNlfRzvQZgE+78fpt3T4PnzkIrJoVPAAAgCas4DU2zyrfmVgBBGCvOpeVtHlW/QDWnRU8AACAJqzgAQBksSt3VgOBVbGCBwAA0IQVPE5rp3v41pF9hQAsw/mwcrf5WYGb7zo6+/PpPPrmY+fF6wOrIvAAADir2ctOX/APn7Jxx+GNb/OGnriD3eUSTQAAgCas4HFe6XjZ6aadXH56rq/Lqi553cm/o8t0gS4W+UYus+fa6txn+z2bj3lazv2D4k/ndKuCe/Fyz3kvZYVzZQUPAACgCSt4AAB70CJXrmbPNc+5z+Ux85pd5dor+/nOtKp6rit3W62u7ubz9hEfe58VPAAAgCZqjLHqMWzboasPjD/7+ONWPQzYE86052zRexJ3a3/bbu6dtCdvvR1/w3/Myb+8u1Y9jr3iosc8bjzxZT+36mHAwsyzN3CdrPv46OX29/z8F8YYh2Zvt4IHAADQhD14AACspZ3s8zvdatpOVtnO9tjT3b6TfXObv3P6d1gp5Eys4AEAADRhDx7Aitj/93Bb7bV8xgvuzK1f/JY9eHOyBw/mt66rYOs6rtPZaoVx9r698lz2GnvwAAAAmrMHDwCA8866riqt67hmzbM6t1eeSzdW8AAAAJo46wpeVb08yU8lOZTkB5J8LclHk/zKGOPvJ8dckeSO05zi4Bjjb6fOdyDJv03y00keleT2JP9qjPGZc34WAHvQdj7fr+N+vd38fMNlMUcC56tlrs7N8y6kX7jx3UmSp914/TKGtNbmuUTzF7IxYb05yV1JnprkxiTPq6p/PsZ4aOrYm5IcmXn838/8/L4kP57kTUn+Ksm/TPLxqnrWGOP2bT8DAFgdcyQAa2WewHvJGOOeqZ+PVtWJJL+d5LlJPjV131+NMT53uhNV1dVJXpXkujHGBya3HU1yPMnbkly7veEDwEqZIwF22TyrhVbuvuese/BmJq5Nn598v2ybv+/aJN9JcsvU+U8l+XCSF1TVhds8HwCsjDkSgHVzru+iec3k+5dnbr+pqv59kpNJjiZ5yxjjL6buf1KSO8YY35x53PEkFyR5wuS/AZiyyv1qi9j/12G/3TaYIwGWZK981t4yx7ntwKuqy7Jxqcgnxxi3Tm5+MMl7knwiyT1Jfigb+xH+pKqeMcbYnOQuTXLfFqc9MXU/AOxJ5kiA5Vr3sNu0zHFuK/Cq6pIkH0tyKslrNm8fY9yd5GenDv1sVf1BNv7S+JZsvBtYklSSsdWp5/jdh5McTpLLL/PxfQCsl1XNkdPz4/5LDp7r8AFoYu5Smrx185Ekj09yzRjjrjMdP8a4s6r+OMnTp24+keTyLQ4/OHX/6c53c5Kbk+TQ1Qe2mgAB2AXn2eWV52SVc+T0/HjRYx5nfgRYgc1LMGetYoVxrg86r6r9ST6S5GzSesAAAAcUSURBVBlJfmxmz8AZH5qH/zXyeJIrq+qimeOuSvLtJF+d87wAsBbMkQCsk3k+6Pz7knwoyY8k+fEzvcXzzOMuT/LsJL83dfORJG9N8opsvIV0qmpfklcm+cQY48FtjR4AVsgcCUCyXnsB57lE87eyMdm8I8nJqnrm1H13jTHuqqrfyMZq4LFsbCB/YpIbkjyU5Fc2Dx5j3F5VtyR55+QvnnckuT7JlUlevYDnAwDLZI4EYK3ME3gvmnx/y+Rr2luT3JiNy0quT/IzSR6R5N5sfLjrW8cYX5l5zGuyMRG+PcmjknwxyQvHGLdtf/gAsFLmSADOaq0+JmGMccUcx7w/yfvn+YVjjAeSvHHyBQB7ljkSgHXj8wYAAAB20TL36M31LpoAAACsvxpj731kTlXdk+RkNvYxsHyPjtd+Vbz2q+O1X41/NMZ4zKoHsVeYH1fO/0+sjtd+dbz2q7PlHLknAy9JqurWMcahVY/jfOS1Xx2v/ep47dkr/G91dbz2q+O1Xx2v/fpxiSYAAEATAg8AAKCJvRx4N696AOcxr/3qeO1Xx2vPXuF/q6vjtV8dr/3qeO3XzJ7dgwcAAPB/27u3EKuqOI7j35/Y/WLjW6QyhmUoaUY3E8pIsIeyh24vRRQGFVQQ9KBFLz34EPZQT0X1phhUxPRSUplZ2QWUIgtDmEjISBtvNWaO/XvYe3B7ODp75Mzea29/H1g4rLM8rPPfHn+z9ln7bDtekz/BMzMzMzMzs4LGLPAkTZf0tqT9kg5IelfSjLrn1TaSFkuKLm1fx7g+Sa9L2iPpb0kfSbqyrnk3jaRpkl6RtFnScF7j/i7jStVZ0tmSXpS0S9Kh/HlvquK1NE2Z2kvqP8H7ICRd1DHWtbfaOSMnnvOxOs7I+jgj26ERCzxJ5wKfAFcADwIPAJcBGySdV+fcWuxJYGGhLRl9QJKAAeA24AngLuAMsuMxrfqpNtIs4F5gL7Cp24Bx1vkN4BHgeeB2YBfwoaSrJmT2zTZm7QtWcfz7YCFwsGOMa2+1ckZWzvk48ZyR9XFGtkFEJN+Ap4CjwKxC30xgBHi67vm1qQGLgQCWnGTMnfmYWwp9U4Ah4OW6X0MTGjCp8PPyvJ79p1JnYH4+7qFC32RgOzBQ92tNrZWsfX/ev3yM53Lt3WpvzsjK6ux8rK7Wzsi0a++MTLw14hM8YBnwVUTsGO2IiEHgC7I3uFVrGfBbRGwY7YiI/cD7+HiUEhH/lRhWts7LgCPAW4VxI8A6YKmks3oy6ZYoWfuyXHtLgTMyHc7HHnBG1scZ2Q5NWeDNBX7o0r8NmFPxXE4XayQdlfSnpLUd13Kc7HjMkHR+NVNsvbJ1ngsMRsRwl3Fnkm23sFOzStJIfl3TQJdrO1x7S4EzslrOxzQ4I+vnjEzU5LonUNJUsr3AnYaAvorn0nb7gdXARuAAsABYCWyWtCAi/iA7Hr90+btD+Z99wF8TP9XWK1vnk70/Rp/Hxucw8CqwHthNdm3TSuBLSddFxE/5ONfeUuCMrIbzMS3OyPo4IxPXlAUeZHt4O6nyWbRcRGwFtha6Nkr6DPiG7MLy58jq7uMx8crW2cejxyJiF/BooWuTpA/Izjo+C9yf97v2lgr/O5xgzsfkOCNr4oxMX1O2aO6l+yq/j+5nBqyHImIL8DNwbd41xImPB/iY9ErZOo81bqjLYzZOEbET+Jxj7wNw7S0NzsiaOB9r5YxMiDMyLU1Z4G0j28fbaQ7wY8VzOV0Vz8Kc7Hj8GhHeftIbZeu8DZiZf1V657h/gR1Yr3SejXTtLQXOyHo5H+vhjEyPMzIRTVngDQA3SLp0tCO/6eKi/DGbQJKuAS4Hvs67BoBLJN1cGHMhcAc+Hr1Uts4DZPf+uacwbjJwH7A+Ig5XM912y79IYRHH3gfg2lsanJE1cT7WyhmZEGdkWhTRbWtsWvIbtX4HHCLb4x7AC8AFwDyfEesdSWuAQWALsI/sIvIVwDBwdUTskTSJ7GP46cAzZNsgVgDzgPn5x/Q2Bkl35z/eSraX/XGyi5V3R8TG8dRZ0jpgaT5uEHiM7IaiN+ZbiKygRO1Xk50A25z3zyar/RTg+ojYXngu195q5YyshvOxWs7I+jgjW6DuG/GVbcAM4B2yb646CLxHx40X3XpS5xXA92TfFnYE2Am8BlzcMW4q8CbZ/ulh4GOy/1Brfw1NaWS/hHVrn463zsA5wEvA78A/ZGfQFtf9GlNtY9UeeBj4luwXhpG8rmuB2a69W4rNGVlJjZ2P1dbbGZlo7Z2R6bdGfIJnZmZmZmZmY2vKNXhmZmZmZmY2Bi/wzMzMzMzMWsILPDMzMzMzs5bwAs/MzMzMzKwlvMAzMzMzMzNrCS/wzMzMzMzMWsILPDMzMzMzs5bwAs/MzMzMzKwlvMAzMzMzMzNrif8Bn0vF5CQbnZMAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "fig = plt.figure(figsize=(15,50))\n", + "\n", + "nplot = min(N_EVALUATE, 10)\n", + "for idx in range(nplot):\n", + " # plot actual\n", + " plt.subplot(nplot, 2, 2*(idx+1)-1)\n", + " plt.imshow(results[idx][0])\n", + " # plot predicted\n", + " plt.subplot(nplot, 2, 2*(idx+1))\n", + " plt.imshow(results[idx][1])\n", + "\n", + "f_axes = fig.axes\n", + "_ = f_axes[0].set_title('Actual')\n", + "_ = f_axes[1].set_title('Predicted') " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "celltoolbar": "Tags", + "kernelspec": { + "display_name": "seismic-interpretation", + "language": "python", + "name": "seismic-interpretation" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/interpretation/notebooks/HRNet_Penobscot_demo_notebook.ipynb b/examples/interpretation/notebooks/HRNet_Penobscot_demo_notebook.ipynb new file mode 100644 index 00000000..cf6366fd --- /dev/null +++ b/examples/interpretation/notebooks/HRNet_Penobscot_demo_notebook.ipynb @@ -0,0 +1,654 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Copyright (c) Microsoft Corporation.\n", + "\n", + "Licensed under the MIT License." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# HRNet training and validation on numpy dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this notebook, we demonstrate how to train an HRNet model for facies prediction using [Penobscot](https://zenodo.org/record/1341774#.XepaaUB2vOg) dataset. The Penobscot 3D seismic dataset was acquired in the Scotian shelf, offshore Nova Scotia, Canada. Please refer to the top-level [README.md](../../../README.md) file to download and prepare this dataset for the experiments. \n", + "\n", + "The data expected in this notebook needs to be in the form of two 3D numpy arrays. One array will contain the seismic information, the other the mask. The network will be trained to take a 2D patch of data from the seismic block and learn to predict the 2D mask patch associated with it." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Environment setup\n", + "\n", + "To set up the conda environment, please follow the instructions in the top-level [README.md](../../../README.md) file.\n", + "\n", + "__Note__: To register the conda environment in Jupyter, run:\n", + "`python -m ipykernel install --user --name envname`\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Library imports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import logging\n", + "import logging.config\n", + "from os import path\n", + "\n", + "import cv2\n", + "import numpy as np\n", + "import yacs.config\n", + "import torch\n", + "from albumentations import Compose, HorizontalFlip, Normalize, PadIfNeeded, Resize\n", + "from cv_lib.utils import load_log_configuration\n", + "from cv_lib.event_handlers import (\n", + " SnapshotHandler,\n", + " logging_handlers,\n", + " tensorboard_handlers,\n", + ")\n", + "from cv_lib.event_handlers.logging_handlers import Evaluator\n", + "from cv_lib.event_handlers.tensorboard_handlers import (\n", + " create_image_writer,\n", + " create_summary_writer,\n", + ")\n", + "from cv_lib.segmentation import models, extract_metric_from\n", + "from cv_lib.segmentation.metrics import (\n", + " pixelwise_accuracy,\n", + " class_accuracy,\n", + " mean_class_accuracy,\n", + " class_iou,\n", + " mean_iou,\n", + ")\n", + "from cv_lib.segmentation.dutchf3.utils import (\n", + " current_datetime,\n", + " generate_path,\n", + " np_to_tb,\n", + ")\n", + "from cv_lib.segmentation.penobscot.engine import (\n", + " create_supervised_evaluator,\n", + " create_supervised_trainer,\n", + ")\n", + "from deepseismic_interpretation.penobscot.data import PenobscotInlinePatchDataset\n", + "from deepseismic_interpretation.dutchf3.data import decode_segmap\n", + "from ignite.contrib.handlers import CosineAnnealingScheduler\n", + "from ignite.engine import Events\n", + "from ignite.metrics import Loss\n", + "from ignite.utils import convert_tensor\n", + "from toolz import compose\n", + "from torch.utils import data\n", + "from itkwidgets import view\n", + "from utilities import plot_aline\n", + "from toolz import take\n", + "\n", + "\n", + "mask_value = 255\n", + "_SEG_COLOURS = np.asarray(\n", + " [[241, 238, 246], [208, 209, 230], [166, 189, 219], [116, 169, 207], [54, 144, 192], [5, 112, 176], [3, 78, 123]]\n", + ")\n", + "\n", + "# experiment configuration file\n", + "CONFIG_FILE = \"./configs/hrnet.yaml\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def _prepare_batch(batch, device=None, non_blocking=False):\n", + " x, y, ids, patch_locations = batch\n", + " return (\n", + " convert_tensor(x, device=device, non_blocking=non_blocking),\n", + " convert_tensor(y, device=device, non_blocking=non_blocking),\n", + " ids,\n", + " patch_locations,\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Experiment configuration file\n", + "We use configuration files to specify experiment configuration, such as hyperparameters used in training and evaluation, as well as other experiment settings. We provide several configuration files for this notebook, under `./configs`, mainly differing in the DNN architecture used for defining the model.\n", + "\n", + "Modify the `CONFIG_FILE` variable above if you would like to run the experiment using a different configuration file." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with open(CONFIG_FILE, \"rt\") as f_read:\n", + " config = yacs.config.load_cfg(f_read)\n", + "\n", + "print(f'Configuration loaded. Please check that the DATASET.ROOT:{config.DATASET.ROOT} points to your data location.')\n", + "print(f'To modify any of the options, please edit the configuration file {CONFIG_FILE} and reload. \\n')\n", + "print(config)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Parameters" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "parameters" + ] + }, + "outputs": [], + "source": [ + "# The number of datapoints you want to run in training or validation per batch \n", + "# Setting to None will run whole dataset\n", + "# useful for integration tests with a setting of something like 3\n", + "# Use only if you want to check things are running and don't want to run\n", + "# through whole dataset\n", + "max_iterations = None \n", + "# The number of epochs to run in training\n", + "max_epochs = config.TRAIN.END_EPOCH \n", + "max_snapshots = config.TRAIN.SNAPSHOTS\n", + "dataset_root = config.DATASET.ROOT" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load Dataset" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from toolz import pipe\n", + "import glob\n", + "from PIL import Image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "image_dir = os.path.join(dataset_root, \"inlines\")\n", + "mask_dir = os.path.join(dataset_root, \"masks\")\n", + "\n", + "image_iter = pipe(os.path.join(image_dir, \"*.tiff\"), glob.iglob,)\n", + "\n", + "_open_to_array = compose(np.array, Image.open)\n", + "\n", + "\n", + "def open_image_mask(image_path):\n", + " return pipe(image_path, _open_to_array)\n", + "\n", + "\n", + "def _mask_filename(imagepath):\n", + " file_part = os.path.splitext(os.path.split(imagepath)[-1].strip())[0]\n", + " return os.path.join(mask_dir, file_part + \"_mask.png\")\n", + "\n", + "\n", + "image_list = sorted(list(image_iter))\n", + "image_list_array = [_open_to_array(i) for i in image_list]\n", + "mask_list_array = [pipe(i, _mask_filename, _open_to_array) for i in image_list]\n", + "mask = np.stack(mask_list_array, axis=0)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's visualize the dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "view(mask, slicing_planes=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's view slices of the data along inline and crossline directions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "idx = 100\n", + "x_in = image_list_array[idx]\n", + "x_inl = mask_list_array[idx]\n", + "\n", + "plot_aline(x_in, x_inl, xlabel=\"inline\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Model training" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Setup logging\n", + "load_log_configuration(config.LOG_CONFIG)\n", + "logger = logging.getLogger(__name__)\n", + "logger.debug(config.WORKERS)\n", + "scheduler_step = max_epochs // max_snapshots\n", + "torch.backends.cudnn.benchmark = config.CUDNN.BENCHMARK\n", + "\n", + "torch.manual_seed(config.SEED)\n", + "if torch.cuda.is_available():\n", + " torch.cuda.manual_seed_all(config.SEED)\n", + "np.random.seed(seed=config.SEED)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Set up data augmentation\n", + "\n", + "Let's define our data augmentation pipeline, which includes basic transformations, such as _data normalization, resizing, and padding_ if necessary.\n", + "The padding is carried out twice becuase if we split the inline or crossline slice into multiple patches then some of these patches will be at the edge of the slice and may not contain a full patch worth of data. To compensate to this and have same size patches in the batch (a requirement) we need to pad them.\n", + "So our basic augmentation is:\n", + "- Normalize\n", + "- Pad if needed to initial size\n", + "- Resize to a larger size\n", + "- Pad further if necessary" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Setup Augmentations\n", + "basic_aug = Compose(\n", + " [\n", + " Normalize(mean=(config.TRAIN.MEAN,), std=(config.TRAIN.STD,), max_pixel_value=config.TRAIN.MAX,),\n", + " PadIfNeeded(\n", + " min_height=config.TRAIN.PATCH_SIZE,\n", + " min_width=config.TRAIN.PATCH_SIZE,\n", + " border_mode=cv2.BORDER_CONSTANT,\n", + " always_apply=True,\n", + " mask_value=mask_value,\n", + " value=0,\n", + " ),\n", + " Resize(config.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT, config.TRAIN.AUGMENTATIONS.RESIZE.WIDTH, always_apply=True,),\n", + " PadIfNeeded(\n", + " min_height=config.TRAIN.AUGMENTATIONS.PAD.HEIGHT,\n", + " min_width=config.TRAIN.AUGMENTATIONS.PAD.WIDTH,\n", + " border_mode=cv2.BORDER_CONSTANT,\n", + " always_apply=True,\n", + " mask_value=mask_value,\n", + " value=0,\n", + " ),\n", + " ]\n", + ")\n", + "if config.TRAIN.AUGMENTATION:\n", + " train_aug = Compose([basic_aug, HorizontalFlip(p=0.5)])\n", + " val_aug = basic_aug\n", + "else:\n", + " train_aug = val_aug = basic_aug" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load the data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For training the model, we will use a patch-based approach. Rather than using entire sections (crosslines or inlines) of the data, we extract a large number of small patches from the sections, and use the patches as our data. This allows us to generate larger set of images for training, but is also a more feasible approach for large seismic volumes.\n", + "\n", + "We are using a custom patch data loader from our __`deepseismic_interpretation`__ library for generating and loading patches from seismic section data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [], + "source": [ + "train_set = PenobscotInlinePatchDataset(\n", + " dataset_root,\n", + " config.TRAIN.PATCH_SIZE,\n", + " config.TRAIN.STRIDE,\n", + " split=\"train\",\n", + " transforms=train_aug,\n", + " n_channels=config.MODEL.IN_CHANNELS,\n", + " complete_patches_only=config.TRAIN.COMPLETE_PATCHES_ONLY,\n", + ")\n", + "\n", + "val_set = PenobscotInlinePatchDataset(\n", + " dataset_root,\n", + " config.TRAIN.PATCH_SIZE,\n", + " config.TRAIN.STRIDE,\n", + " split=\"val\",\n", + " transforms=val_aug,\n", + " n_channels=config.MODEL.IN_CHANNELS,\n", + " complete_patches_only=config.VALIDATION.COMPLETE_PATCHES_ONLY,\n", + ")\n", + "\n", + "logger.info(train_set)\n", + "logger.info(val_set)\n", + "\n", + "n_classes = train_set.n_classes\n", + "train_loader = data.DataLoader(\n", + " train_set, batch_size=config.TRAIN.BATCH_SIZE_PER_GPU, num_workers=config.WORKERS, shuffle=True,\n", + ")\n", + "\n", + "val_loader = data.DataLoader(val_set, batch_size=config.VALIDATION.BATCH_SIZE_PER_GPU, num_workers=config.WORKERS,)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Set up model training\n", + "Next, let's define a model to train, an optimization algorithm, and a loss function.\n", + "\n", + "Note that the model is loaded from our __`cv_lib`__ library, using the name of the model as specified in the configuration file. To load a different model, either change the `MODEL.NAME` field in the configuration file, or create a new one corresponding to the model you wish to train." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model = getattr(models, config.MODEL.NAME).get_seg_model(config)\n", + "\n", + "device = \"cpu\"\n", + "if torch.cuda.is_available():\n", + " device = \"cuda\"\n", + "model = model.to(device) # Send to GPU\n", + "\n", + "optimizer = torch.optim.SGD(\n", + " model.parameters(), lr=config.TRAIN.MAX_LR, momentum=config.TRAIN.MOMENTUM, weight_decay=config.TRAIN.WEIGHT_DECAY,\n", + ")\n", + "\n", + "output_dir = generate_path(config.OUTPUT_DIR, config.MODEL.NAME, current_datetime(),)\n", + "summary_writer = create_summary_writer(log_dir=path.join(output_dir, config.LOG_DIR))\n", + "snapshot_duration = scheduler_step * len(train_loader)\n", + "scheduler = CosineAnnealingScheduler(optimizer, \"lr\", config.TRAIN.MAX_LR, config.TRAIN.MIN_LR, snapshot_duration)\n", + "\n", + "criterion = torch.nn.CrossEntropyLoss(ignore_index=mask_value, reduction=\"mean\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Training the model\n", + "We use [ignite](https://pytorch.org/ignite/index.html) framework to create training and validation loops in our codebase. Ignite provides an easy way to create compact training/validation loops without too much boilerplate code.\n", + "\n", + "In this notebook, we demonstrate the use of ignite on the training loop only. We create a training engine `trainer` that loops multiple times over the training dataset and updates model parameters. In addition, we add various events to the trainer, using an event system, that allows us to interact with the engine on each step of the run, such as, when the trainer is started/completed, when the epoch is started/completed and so on.\n", + "\n", + "In the cell below, we use event handlers to add the following events to the training loop:\n", + "- log training output\n", + "- log and schedule learning rate and\n", + "- periodically save model to disk." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "trainer = create_supervised_trainer(model, optimizer, criterion, _prepare_batch, device=device)\n", + "\n", + "trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)\n", + "\n", + "trainer.add_event_handler(\n", + " Events.ITERATION_COMPLETED, logging_handlers.log_training_output(log_interval=config.PRINT_FREQ),\n", + ")\n", + "trainer.add_event_handler(Events.EPOCH_STARTED, logging_handlers.log_lr(optimizer))\n", + "trainer.add_event_handler(\n", + " Events.EPOCH_STARTED, tensorboard_handlers.log_lr(summary_writer, optimizer, \"epoch\"),\n", + ")\n", + "trainer.add_event_handler(\n", + " Events.ITERATION_COMPLETED, tensorboard_handlers.log_training_output(summary_writer),\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def _select_pred_and_mask(model_out_dict):\n", + " return (model_out_dict[\"y_pred\"].squeeze(), model_out_dict[\"mask\"].squeeze())\n", + "\n", + "\n", + "evaluator = create_supervised_evaluator(\n", + " model,\n", + " _prepare_batch,\n", + " metrics={\n", + " \"pixacc\": pixelwise_accuracy(n_classes, output_transform=_select_pred_and_mask),\n", + " \"nll\": Loss(criterion, output_transform=_select_pred_and_mask),\n", + " \"cacc\": class_accuracy(n_classes, output_transform=_select_pred_and_mask),\n", + " \"mca\": mean_class_accuracy(n_classes, output_transform=_select_pred_and_mask),\n", + " \"ciou\": class_iou(n_classes, output_transform=_select_pred_and_mask),\n", + " \"mIoU\": mean_iou(n_classes, output_transform=_select_pred_and_mask),\n", + " },\n", + " device=device,\n", + ")\n", + "\n", + "if max_iterations is not None:\n", + " val_loader = take(max_iterations, val_loader)\n", + "\n", + "# Set the validation run to start on the epoch completion of the training run\n", + "trainer.add_event_handler(Events.EPOCH_COMPLETED, Evaluator(evaluator, val_loader))\n", + "\n", + "evaluator.add_event_handler(\n", + " Events.EPOCH_COMPLETED,\n", + " logging_handlers.log_metrics(\n", + " \"Validation results\",\n", + " metrics_dict={\n", + " \"nll\": \"Avg loss :\",\n", + " \"pixacc\": \"Pixelwise Accuracy :\",\n", + " \"mca\": \"Avg Class Accuracy :\",\n", + " \"mIoU\": \"Avg Class IoU :\",\n", + " },\n", + " ),\n", + ")\n", + "evaluator.add_event_handler(\n", + " Events.EPOCH_COMPLETED,\n", + " tensorboard_handlers.log_metrics(\n", + " summary_writer,\n", + " trainer,\n", + " \"epoch\",\n", + " metrics_dict={\n", + " \"mIoU\": \"Validation/mIoU\",\n", + " \"nll\": \"Validation/Loss\",\n", + " \"mca\": \"Validation/MCA\",\n", + " \"pixacc\": \"Validation/Pixel_Acc\",\n", + " },\n", + " ),\n", + ")\n", + "\n", + "\n", + "def _select_max(pred_tensor):\n", + " return pred_tensor.max(1)[1]\n", + "\n", + "\n", + "def _tensor_to_numpy(pred_tensor):\n", + " return pred_tensor.squeeze().cpu().numpy()\n", + "\n", + "\n", + "transform_func = compose(np_to_tb, decode_segmap(n_classes=n_classes, label_colours=_SEG_COLOURS), _tensor_to_numpy,)\n", + "\n", + "transform_pred = compose(transform_func, _select_max)\n", + "\n", + "evaluator.add_event_handler(\n", + " Events.EPOCH_COMPLETED, create_image_writer(summary_writer, \"Validation/Image\", \"image\"),\n", + ")\n", + "evaluator.add_event_handler(\n", + " Events.EPOCH_COMPLETED,\n", + " create_image_writer(summary_writer, \"Validation/Mask\", \"mask\", transform_func=transform_func),\n", + ")\n", + "evaluator.add_event_handler(\n", + " Events.EPOCH_COMPLETED,\n", + " create_image_writer(summary_writer, \"Validation/Pred\", \"y_pred\", transform_func=transform_pred),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Checkpointing\n", + "Below we define the function that will save the best performing models based on mean IoU." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def snapshot_function():\n", + " return (trainer.state.iteration % snapshot_duration) == 0\n", + "\n", + "\n", + "checkpoint_handler = SnapshotHandler(\n", + " path.join(output_dir, config.TRAIN.MODEL_DIR), config.MODEL.NAME, extract_metric_from(\"mIoU\"), snapshot_function,\n", + ")\n", + "evaluator.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {\"model\": model})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Start the training engine run." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if max_iterations is not None:\n", + " train_loader = take(max_iterations, train_loader)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "logger.info(\"Starting training\")\n", + "trainer.run(train_loader, max_epochs=max_epochs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Tensorboard\n", + "Using tensorboard for monitoring runs can be quite enlightening. Just ensure that the appropriate port is open on the VM so you can access it. Below we have the command for running tensorboard in your notebook. You can as easily view it in a seperate browser window by pointing the browser to the appropriate location and port." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if max_epochs>1:\n", + " %load_ext tensorboard" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if max_epochs>1:\n", + " %tensorboard --logdir outputs --port 6007 --host 0.0.0.0" + ] + } + ], + "metadata": { + "celltoolbar": "Tags", + "kernelspec": { + "display_name": "seismic-interpretation", + "language": "python", + "name": "seismic-interpretation" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/interpretation/notebooks/configs/hrnet.yaml b/examples/interpretation/notebooks/configs/hrnet.yaml new file mode 100644 index 00000000..689f8c9c --- /dev/null +++ b/examples/interpretation/notebooks/configs/hrnet.yaml @@ -0,0 +1,109 @@ +CUDNN: + BENCHMARK: true + DETERMINISTIC: false + ENABLED: true +GPUS: (0,) +OUTPUT_DIR: 'outputs' +LOG_DIR: 'log' +WORKERS: 4 +PRINT_FREQ: 50 + +LOG_CONFIG: logging.conf +SEED: 2019 + + +DATASET: + NUM_CLASSES: 7 + ROOT: /mnt/penobscot + CLASS_WEIGHTS: [0.02630481, 0.05448931, 0.0811898 , 0.01866496, 0.15868563, 0.0875993 , 0.5730662] + INLINE_HEIGHT: 1501 + INLINE_WIDTH: 481 + + +MODEL: + NAME: seg_hrnet + IN_CHANNELS: 3 + PRETRAINED: '/data/hrnet_pretrained/image_classification/hrnetv2_w48_imagenet_pretrained.pth' + EXTRA: + FINAL_CONV_KERNEL: 1 + STAGE2: + NUM_MODULES: 1 + NUM_BRANCHES: 2 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + FUSE_METHOD: SUM + STAGE3: + NUM_MODULES: 4 + NUM_BRANCHES: 3 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + - 192 + FUSE_METHOD: SUM + STAGE4: + NUM_MODULES: 3 + NUM_BRANCHES: 4 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + - 192 + - 384 + FUSE_METHOD: SUM + +TRAIN: + COMPLETE_PATCHES_ONLY: True + BATCH_SIZE_PER_GPU: 32 + BEGIN_EPOCH: 0 + END_EPOCH: 300 + MIN_LR: 0.0001 + MAX_LR: 0.02 + MOMENTUM: 0.9 + WEIGHT_DECAY: 0.0001 + SNAPSHOTS: 5 + AUGMENTATION: True + DEPTH: "none" #"patch" # Options are none, patch and section + STRIDE: 64 + PATCH_SIZE: 128 + AUGMENTATIONS: + RESIZE: + HEIGHT: 256 + WIDTH: 256 + PAD: + HEIGHT: 256 + WIDTH: 256 + MEAN: [-0.0001777, 0.49, -0.0000688] # First value is for images, second for depth and then combination of both + STD: [0.14076 , 0.2717, 0.06286] + MAX: 1 + MODEL_DIR: "models" + + +VALIDATION: + BATCH_SIZE_PER_GPU: 128 + COMPLETE_PATCHES_ONLY: True + +TEST: + COMPLETE_PATCHES_ONLY: False + MODEL_PATH: "/data/home/mat/repos/DeepSeismic/experiments/segmentation/penobscot/local/output/penobscot/437970c875226e7e39c8109c0de8d21c5e5d6e3b/seg_hrnet/Sep25_144942/models/seg_hrnet_running_model_28.pth" + AUGMENTATIONS: + RESIZE: + HEIGHT: 256 + WIDTH: 256 + PAD: + HEIGHT: 256 + WIDTH: 256 diff --git a/examples/interpretation/notebooks/configs/patch_deconvnet_skip.yaml b/examples/interpretation/notebooks/configs/patch_deconvnet_skip.yaml new file mode 100644 index 00000000..52657ea0 --- /dev/null +++ b/examples/interpretation/notebooks/configs/patch_deconvnet_skip.yaml @@ -0,0 +1,59 @@ +CUDNN: + BENCHMARK: true + DETERMINISTIC: false + ENABLED: true +GPUS: (0,) +OUTPUT_DIR: 'output' +LOG_DIR: 'log' +WORKERS: 4 +PRINT_FREQ: 50 +LOG_CONFIG: logging.conf +SEED: 2019 + +DATASET: + NUM_CLASSES: 6 + ROOT: /data/dutchf3 + CLASS_WEIGHTS: [0.7151, 0.8811, 0.5156, 0.9346, 0.9683, 0.9852] + +MODEL: + NAME: patch_deconvnet_skip + IN_CHANNELS: 1 + + +TRAIN: + BATCH_SIZE_PER_GPU: 64 + BEGIN_EPOCH: 0 + END_EPOCH: 100 + MIN_LR: 0.001 + MAX_LR: 0.02 + MOMENTUM: 0.9 + WEIGHT_DECAY: 0.0001 + SNAPSHOTS: 5 + AUGMENTATION: True + DEPTH: "none" #"patch" # Options are No, Patch and Section + STRIDE: 50 + PATCH_SIZE: 99 + AUGMENTATIONS: + RESIZE: + HEIGHT: 99 + WIDTH: 99 + PAD: + HEIGHT: 99 + WIDTH: 99 + MEAN: 0.0009997 # 0.0009996710808862074 + STD: 0.20977 # 0.20976548783479299 + MODEL_DIR: "models" + +VALIDATION: + BATCH_SIZE_PER_GPU: 512 + +TEST: + MODEL_PATH: '/data/home/mat/repos/DeepSeismic/examples/interpretation/notebooks/output/models/model_patch_deconvnet_skip_2.pth' + TEST_STRIDE: 10 + SPLIT: 'test1' # Can be both, test1, test2 + INLINE: True + CROSSLINE: True + POST_PROCESSING: + SIZE: 99 # + CROP_PIXELS: 0 # Number of pixels to crop top, bottom, left and right + diff --git a/examples/interpretation/notebooks/configs/unet.yaml b/examples/interpretation/notebooks/configs/unet.yaml new file mode 100644 index 00000000..d1efc6b5 --- /dev/null +++ b/examples/interpretation/notebooks/configs/unet.yaml @@ -0,0 +1,59 @@ +# UNet configuration + +CUDNN: + BENCHMARK: true + DETERMINISTIC: false + ENABLED: true +GPUS: (0,) +OUTPUT_DIR: 'output' +LOG_DIR: 'log' +WORKERS: 4 +PRINT_FREQ: 50 +LOG_CONFIG: logging.conf +SEED: 2019 + + +DATASET: + NUM_CLASSES: 6 + ROOT: '/data/dutchf3' + CLASS_WEIGHTS: [0.7151, 0.8811, 0.5156, 0.9346, 0.9683, 0.9852] + +MODEL: + NAME: resnet_unet + IN_CHANNELS: 3 + + +TRAIN: + BATCH_SIZE_PER_GPU: 16 + BEGIN_EPOCH: 0 + END_EPOCH: 10 + MIN_LR: 0.001 + MAX_LR: 0.02 + MOMENTUM: 0.9 + WEIGHT_DECAY: 0.0001 + SNAPSHOTS: 5 + AUGMENTATION: True + DEPTH: "section" # Options are No, Patch and Section + STRIDE: 50 + PATCH_SIZE: 100 + AUGMENTATIONS: + RESIZE: + HEIGHT: 200 + WIDTH: 200 + PAD: + HEIGHT: 256 + WIDTH: 256 + MEAN: 0.0009997 # 0.0009996710808862074 + STD: 0.20977 # 0.20976548783479299 + MODEL_DIR: "models" + +TEST: + MODEL_PATH: "" + TEST_STRIDE: 10 + SPLIT: 'Both' # Can be Both, Test1, Test2 + INLINE: True + CROSSLINE: True + POST_PROCESSING: + SIZE: 128 + CROP_PIXELS: 14 # Number of pixels to crop top, bottom, left and right + diff --git a/examples/interpretation/notebooks/logging.conf b/examples/interpretation/notebooks/logging.conf new file mode 100644 index 00000000..56334fc4 --- /dev/null +++ b/examples/interpretation/notebooks/logging.conf @@ -0,0 +1,34 @@ +[loggers] +keys=root,__main__,event_handlers + +[handlers] +keys=consoleHandler + +[formatters] +keys=simpleFormatter + +[logger_root] +level=INFO +handlers=consoleHandler + +[logger___main__] +level=INFO +handlers=consoleHandler +qualname=__main__ +propagate=0 + +[logger_event_handlers] +level=INFO +handlers=consoleHandler +qualname=event_handlers +propagate=0 + +[handler_consoleHandler] +class=StreamHandler +level=INFO +formatter=simpleFormatter +args=(sys.stdout,) + +[formatter_simpleFormatter] +format=%(asctime)s - %(name)s - %(levelname)s - %(message)s + diff --git a/examples/interpretation/notebooks/utilities.py b/examples/interpretation/notebooks/utilities.py new file mode 100644 index 00000000..0ef72f02 --- /dev/null +++ b/examples/interpretation/notebooks/utilities.py @@ -0,0 +1,242 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import itertools + +import matplotlib.pyplot as plt +import numpy as np +import torch +import torch.nn.functional as F +from ignite.utils import convert_tensor +from scipy.ndimage import zoom +from toolz import compose, curry, itertoolz, pipe + + +class runningScore(object): + def __init__(self, n_classes): + self.n_classes = n_classes + self.confusion_matrix = np.zeros((n_classes, n_classes)) + + def _fast_hist(self, label_true, label_pred, n_class): + mask = (label_true >= 0) & (label_true < n_class) + hist = np.bincount(n_class * label_true[mask].astype(int) + label_pred[mask], minlength=n_class ** 2,).reshape( + n_class, n_class + ) + return hist + + def update(self, label_trues, label_preds): + for lt, lp in zip(label_trues, label_preds): + self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten(), self.n_classes) + + def get_scores(self): + """Returns accuracy score evaluation result. + - overall accuracy + - mean accuracy + - mean IU + - fwavacc + """ + hist = self.confusion_matrix + acc = np.diag(hist).sum() / hist.sum() + acc_cls = np.diag(hist) / hist.sum(axis=1) + mean_acc_cls = np.nanmean(acc_cls) + iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)) + mean_iu = np.nanmean(iu) + freq = hist.sum(axis=1) / hist.sum() # fraction of the pixels that come from each class + fwavacc = (freq[freq > 0] * iu[freq > 0]).sum() + cls_iu = dict(zip(range(self.n_classes), iu)) + + return ( + { + "Pixel Acc: ": acc, + "Class Accuracy: ": acc_cls, + "Mean Class Acc: ": mean_acc_cls, + "Freq Weighted IoU: ": fwavacc, + "Mean IoU: ": mean_iu, + "confusion_matrix": self.confusion_matrix, + }, + cls_iu, + ) + + def reset(self): + self.confusion_matrix = np.zeros((self.n_classes, self.n_classes)) + + +def prepare_batch(batch, device=None, non_blocking=False): + x, y = batch + return ( + convert_tensor(x, device=device, non_blocking=non_blocking), + convert_tensor(y, device=device, non_blocking=non_blocking), + ) + + +def _transform_CHW_to_HWC(numpy_array): + return np.moveaxis(numpy_array, 0, -1) + + +def _transform_HWC_to_CHW(numpy_array): + return np.moveaxis(numpy_array, -1, 0) + + +@curry +def _apply_augmentation3D(aug, numpy_array): + assert len(numpy_array.shape) == 3, "This method only accepts 3D arrays" + patch = _transform_CHW_to_HWC(numpy_array) + patch = aug(image=patch)["image"] + return _transform_HWC_to_CHW(patch) + + +@curry +def _apply_augmentation2D(aug, numpy_array): + assert len(numpy_array.shape) == 2, "This method only accepts 2D arrays" + return aug(image=numpy_array)["image"] + + +_AUGMENTATION = {3: _apply_augmentation3D, 2: _apply_augmentation2D} + + +@curry +def _apply_augmentation(aug, image): + if isinstance(image, torch.Tensor): + image = image.numpy() + + if aug is not None: + return _AUGMENTATION[len(image.shape)](aug, image) + else: + return image + + +def _add_depth(image): + if isinstance(image, torch.Tensor): + image = image.numpy() + return add_patch_depth_channels(image) + + +def _to_torch(image): + if isinstance(image, torch.Tensor): + return image + else: + return torch.from_numpy(image).to(torch.float32) + + +def _expand_dims_if_necessary(torch_tensor): + if len(torch_tensor.shape) == 2: + return torch_tensor.unsqueeze(dim=0) + else: + return torch_tensor + + +@curry +def _extract_patch(hdx, wdx, ps, patch_size, img_p): + if len(img_p.shape) == 2: # 2D + return img_p[hdx + ps : hdx + ps + patch_size, wdx + ps : wdx + ps + patch_size] + else: # 3D + return img_p[:, hdx + ps : hdx + ps + patch_size, wdx + ps : wdx + ps + patch_size] + + +def compose_processing_pipeline(depth, aug=None): + steps = [] + if aug is not None: + steps.append(_apply_augmentation(aug)) + + if depth == "patch": + steps.append(_add_depth) + + steps.append(_to_torch) + steps.append(_expand_dims_if_necessary) + steps.reverse() + return compose(*steps) + + +def _generate_batches(h, w, ps, patch_size, stride, batch_size=64): + hdc_wdx_generator = itertools.product(range(0, h - patch_size + ps, stride), range(0, w - patch_size + ps, stride)) + + for batch_indexes in itertoolz.partition_all(batch_size, hdc_wdx_generator): + yield batch_indexes + + +@curry +def output_processing_pipeline(config, output): + output = output.unsqueeze(0) + _, _, h, w = output.shape + if config.TEST.POST_PROCESSING.SIZE != h or config.TEST.POST_PROCESSING.SIZE != w: + output = F.interpolate( + output, size=(config.TEST.POST_PROCESSING.SIZE, config.TEST.POST_PROCESSING.SIZE), mode="bilinear", + ) + + if config.TEST.POST_PROCESSING.CROP_PIXELS > 0: + _, _, h, w = output.shape + output = output[ + :, + :, + config.TEST.POST_PROCESSING.CROP_PIXELS : h - config.TEST.POST_PROCESSING.CROP_PIXELS, + config.TEST.POST_PROCESSING.CROP_PIXELS : w - config.TEST.POST_PROCESSING.CROP_PIXELS, + ] + return output.squeeze() + + +def patch_label_2d( + model, img, pre_processing, output_processing, patch_size, stride, batch_size, device, num_classes, +): + """Processes a whole section""" + img = torch.squeeze(img) + h, w = img.shape[-2], img.shape[-1] # height and width + + # Pad image with patch_size/2: + ps = int(np.floor(patch_size / 2)) # pad size + img_p = F.pad(img, pad=(ps, ps, ps, ps), mode="constant", value=0) + output_p = torch.zeros([1, num_classes, h + 2 * ps, w + 2 * ps]) + + # generate output: + for batch_indexes in _generate_batches(h, w, ps, patch_size, stride, batch_size=batch_size): + batch = torch.stack( + [pipe(img_p, _extract_patch(hdx, wdx, ps, patch_size), pre_processing) for hdx, wdx in batch_indexes], + dim=0, + ) + + model_output = model(batch.to(device)) + for (hdx, wdx), output in zip(batch_indexes, model_output.detach().cpu()): + output = output_processing(output) + output_p[:, :, hdx + ps : hdx + ps + patch_size, wdx + ps : wdx + ps + patch_size] += output + + # crop the output_p in the middle + output = output_p[:, :, ps:-ps, ps:-ps] + return output + + +def write_section_file(labels, section_file, config): + # define indices of the array + irange, xrange, depth = labels.shape + + if config.TEST.INLINE: + i_list = list(range(irange)) + i_list = ["i_" + str(inline) for inline in i_list] + else: + i_list = [] + + if config.TEST.CROSSLINE: + x_list = list(range(xrange)) + x_list = ["x_" + str(crossline) for crossline in x_list] + else: + x_list = [] + + list_test = i_list + x_list + + file_object = open(section_file, "w") + file_object.write("\n".join(list_test)) + file_object.close() + + +def plot_aline(aline, labels, xlabel, ylabel="depth"): + """Plot a section of the data.""" + plt.figure(figsize=(18, 6)) + # data + plt.subplot(1, 2, 1) + plt.imshow(aline) + plt.title("Data") + plt.xlabel(xlabel) + plt.ylabel(ylabel) + # mask + plt.subplot(1, 2, 2) + plt.imshow(labels) + plt.xlabel(xlabel) + plt.title("Label") diff --git a/experiments/interpretation/dutchf3_patch/README.md b/experiments/interpretation/dutchf3_patch/README.md new file mode 100644 index 00000000..8fbd2d60 --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/README.md @@ -0,0 +1,29 @@ +## F3 Netherlands Patch Experiments +In this folder are training and testing scripts that work on the F3 Netherlands dataset. +You can run five different models on this dataset: +* [HRNet](local/configs/hrnet.yaml) +* [SEResNet](local/configs/seresnet_unet.yaml) +* [UNet](local/configs/unet.yaml) +* [PatchDeconvNet](local/configs/patch_patch_deconvnet.yaml) +* [PatchDeconvNet-Skip](local/configs/patch_deconvnet_skip.yaml.yaml) + +All these models take 2D patches of the dataset as input and provide predictions for those patches. The patches need to be stitched together to form a whole inline or crossline. + +To understand the configuration files and the dafault parameters refer to this [section in the top level README](../../../README.md#configuration-files) + +### Setup + +Please set up a conda environment following the instructions in the top-level [README.md](../../../README.md#setting-up-environment) file. +Also follow instructions for [downloading and preparing](../../../README.md#f3-Netherlands) the data. + +### Running experiments + +Now you're all set to run training and testing experiments on the F3 Netherlands dataset. Please start from the `train.sh` and `test.sh` scripts under the `local/` and `distributed/` directories, which invoke the corresponding python scripts. Take a look at the project configurations in (e.g in `default.py`) for experiment options and modify if necessary. + +### Monitoring progress with TensorBoard +- from the this directory, run `tensorboard --logdir='output'` (all runtime logging information is +written to the `output` folder +- open a web-browser and go to either vmpublicip:6006 if running remotely or localhost:6006 if running locally +> **NOTE**:If running remotely remember that the port must be open and accessible + +More information on Tensorboard can be found [here](https://www.tensorflow.org/get_started/summaries_and_tensorboard#launching_tensorboard). diff --git a/experiments/interpretation/dutchf3_patch/distributed/configs/hrnet.yaml b/experiments/interpretation/dutchf3_patch/distributed/configs/hrnet.yaml new file mode 100644 index 00000000..04ad6479 --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/distributed/configs/hrnet.yaml @@ -0,0 +1,102 @@ +CUDNN: + BENCHMARK: true + DETERMINISTIC: false + ENABLED: true +GPUS: (0,) +OUTPUT_DIR: 'output' +LOG_DIR: 'log' +WORKERS: 4 +PRINT_FREQ: 10 +LOG_CONFIG: logging.conf +SEED: 2019 + + +DATASET: + NUM_CLASSES: 6 + ROOT: /mnt/dutchf3 + CLASS_WEIGHTS: [0.7151, 0.8811, 0.5156, 0.9346, 0.9683, 0.9852] + + +MODEL: + NAME: seg_hrnet + IN_CHANNELS: 3 + PRETRAINED: '/mnt/hrnet_pretrained/image_classification/hrnetv2_w48_imagenet_pretrained.pth' + EXTRA: + FINAL_CONV_KERNEL: 1 + STAGE2: + NUM_MODULES: 1 + NUM_BRANCHES: 2 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + FUSE_METHOD: SUM + STAGE3: + NUM_MODULES: 4 + NUM_BRANCHES: 3 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + - 192 + FUSE_METHOD: SUM + STAGE4: + NUM_MODULES: 3 + NUM_BRANCHES: 4 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + - 192 + - 384 + FUSE_METHOD: SUM + +TRAIN: + BATCH_SIZE_PER_GPU: 16 + BEGIN_EPOCH: 0 + END_EPOCH: 300 + MIN_LR: 0.001 + MAX_LR: 0.02 + MOMENTUM: 0.9 + WEIGHT_DECAY: 0.0001 + SNAPSHOTS: 5 + AUGMENTATION: True + DEPTH: "section" #"patch" # Options are No, Patch and Section + STRIDE: 50 + PATCH_SIZE: 100 + AUGMENTATIONS: + RESIZE: + HEIGHT: 200 + WIDTH: 200 + PAD: + HEIGHT: 256 + WIDTH: 256 + MEAN: 0.0009997 # 0.0009996710808862074 + STD: 0.20977 # 0.20976548783479299 + MODEL_DIR: "models" + + +VALIDATION: + BATCH_SIZE_PER_GPU: 32 + +TEST: + MODEL_PATH: "/data/home/mat/repos/DeepSeismic/interpretation/experiments/segmentation/dutchf3/local/output/mat/exp/ccb7206b41dc7411609705e49d9f4c2d74c6eb88/seg_hrnet/Aug30_141919/models/seg_hrnet_running_model_18.pth" + TEST_STRIDE: 10 + SPLIT: 'Both' # Can be Both, Test1, Test2 + INLINE: True + CROSSLINE: True + POST_PROCESSING: + SIZE: 128 # + CROP_PIXELS: 14 # Number of pixels to crop top, bottom, left and right diff --git a/experiments/interpretation/dutchf3_patch/distributed/configs/patch_deconvnet.yaml b/experiments/interpretation/dutchf3_patch/distributed/configs/patch_deconvnet.yaml new file mode 100644 index 00000000..eb89ff00 --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/distributed/configs/patch_deconvnet.yaml @@ -0,0 +1,59 @@ +CUDNN: + BENCHMARK: true + DETERMINISTIC: false + ENABLED: true +GPUS: (0,) +OUTPUT_DIR: 'output' +LOG_DIR: 'log' +WORKERS: 4 +PRINT_FREQ: 10 +LOG_CONFIG: logging.conf +SEED: 2019 + +DATASET: + NUM_CLASSES: 6 + ROOT: /mnt/dutchf3 + CLASS_WEIGHTS: [0.7151, 0.8811, 0.5156, 0.9346, 0.9683, 0.9852] + +MODEL: + NAME: patch_deconvnet_skip + IN_CHANNELS: 1 + + +TRAIN: + BATCH_SIZE_PER_GPU: 64 + BEGIN_EPOCH: 0 + END_EPOCH: 300 + MIN_LR: 0.001 + MAX_LR: 0.02 + MOMENTUM: 0.9 + WEIGHT_DECAY: 0.0001 + SNAPSHOTS: 5 + AUGMENTATION: True + DEPTH: "none" #"patch" # Options are None, Patch and Section + STRIDE: 50 + PATCH_SIZE: 99 + AUGMENTATIONS: + RESIZE: + HEIGHT: 99 + WIDTH: 99 + PAD: + HEIGHT: 99 + WIDTH: 99 + MEAN: 0.0009997 # 0.0009996710808862074 + STD: 0.20977 # 0.20976548783479299 + MODEL_DIR: "models" + +VALIDATION: + BATCH_SIZE_PER_GPU: 512 + +TEST: + MODEL_PATH: "" + TEST_STRIDE: 10 + SPLIT: 'Both' # Can be Both, Test1, Test2 + INLINE: True + CROSSLINE: True + POST_PROCESSING: + SIZE: 99 # + CROP_PIXELS: 0 # Number of pixels to crop top, bottom, left and right + diff --git a/experiments/interpretation/dutchf3_patch/distributed/configs/patch_deconvnet_skip.yaml b/experiments/interpretation/dutchf3_patch/distributed/configs/patch_deconvnet_skip.yaml new file mode 100644 index 00000000..eb89ff00 --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/distributed/configs/patch_deconvnet_skip.yaml @@ -0,0 +1,59 @@ +CUDNN: + BENCHMARK: true + DETERMINISTIC: false + ENABLED: true +GPUS: (0,) +OUTPUT_DIR: 'output' +LOG_DIR: 'log' +WORKERS: 4 +PRINT_FREQ: 10 +LOG_CONFIG: logging.conf +SEED: 2019 + +DATASET: + NUM_CLASSES: 6 + ROOT: /mnt/dutchf3 + CLASS_WEIGHTS: [0.7151, 0.8811, 0.5156, 0.9346, 0.9683, 0.9852] + +MODEL: + NAME: patch_deconvnet_skip + IN_CHANNELS: 1 + + +TRAIN: + BATCH_SIZE_PER_GPU: 64 + BEGIN_EPOCH: 0 + END_EPOCH: 300 + MIN_LR: 0.001 + MAX_LR: 0.02 + MOMENTUM: 0.9 + WEIGHT_DECAY: 0.0001 + SNAPSHOTS: 5 + AUGMENTATION: True + DEPTH: "none" #"patch" # Options are None, Patch and Section + STRIDE: 50 + PATCH_SIZE: 99 + AUGMENTATIONS: + RESIZE: + HEIGHT: 99 + WIDTH: 99 + PAD: + HEIGHT: 99 + WIDTH: 99 + MEAN: 0.0009997 # 0.0009996710808862074 + STD: 0.20977 # 0.20976548783479299 + MODEL_DIR: "models" + +VALIDATION: + BATCH_SIZE_PER_GPU: 512 + +TEST: + MODEL_PATH: "" + TEST_STRIDE: 10 + SPLIT: 'Both' # Can be Both, Test1, Test2 + INLINE: True + CROSSLINE: True + POST_PROCESSING: + SIZE: 99 # + CROP_PIXELS: 0 # Number of pixels to crop top, bottom, left and right + diff --git a/experiments/interpretation/dutchf3_patch/distributed/configs/seresnet_unet.yaml b/experiments/interpretation/dutchf3_patch/distributed/configs/seresnet_unet.yaml new file mode 100644 index 00000000..d0b8126f --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/distributed/configs/seresnet_unet.yaml @@ -0,0 +1,59 @@ +CUDNN: + BENCHMARK: true + DETERMINISTIC: false + ENABLED: true +GPUS: (0,) +OUTPUT_DIR: 'output' +LOG_DIR: 'log' +WORKERS: 4 +PRINT_FREQ: 10 +LOG_CONFIG: logging.conf +SEED: 2019 + + +DATASET: + NUM_CLASSES: 6 + ROOT: /mnt/dutchf3 + CLASS_WEIGHTS: [0.7151, 0.8811, 0.5156, 0.9346, 0.9683, 0.9852] + +MODEL: + NAME: resnet_unet + IN_CHANNELS: 3 + +TRAIN: + BATCH_SIZE_PER_GPU: 16 + BEGIN_EPOCH: 0 + END_EPOCH: 300 + MIN_LR: 0.001 + MAX_LR: 0.02 + MOMENTUM: 0.9 + WEIGHT_DECAY: 0.0001 + SNAPSHOTS: 5 + AUGMENTATION: True + DEPTH: "section" # Options are No, Patch and Section + STRIDE: 50 + PATCH_SIZE: 100 + AUGMENTATIONS: + RESIZE: + HEIGHT: 200 + WIDTH: 200 + PAD: + HEIGHT: 256 + WIDTH: 256 + MEAN: 0.0009997 # 0.0009996710808862074 + STD: 0.20977 # 0.20976548783479299 + MODEL_DIR: "models" + + +VALIDATION: + BATCH_SIZE_PER_GPU: 32 + +TEST: + MODEL_PATH: "/data/home/mat/repos/DeepSeismic/interpretation/experiments/segmentation/dutchf3/local/output/mat/exp/dc2e2d20b7f6d508beb779ffff37c77d0139e588/resnet_unet/Sep01_125513/models/resnet_unet_snapshot1model_52.pth" + TEST_STRIDE: 10 + SPLIT: 'Both' # Can be Both, Test1, Test2 + INLINE: True + CROSSLINE: True + POST_PROCESSING: + SIZE: 128 + CROP_PIXELS: 14 # Number of pixels to crop top, bottom, left and right \ No newline at end of file diff --git a/experiments/interpretation/dutchf3_patch/distributed/configs/unet.yaml b/experiments/interpretation/dutchf3_patch/distributed/configs/unet.yaml new file mode 100644 index 00000000..2843e62c --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/distributed/configs/unet.yaml @@ -0,0 +1,63 @@ +# UNet configuration + +CUDNN: + BENCHMARK: true + DETERMINISTIC: false + ENABLED: true +GPUS: (0,) +OUTPUT_DIR: 'output' +LOG_DIR: 'log' +WORKERS: 4 +PRINT_FREQ: 10 +LOG_CONFIG: logging.conf +SEED: 2019 + + +DATASET: + NUM_CLASSES: 6 + ROOT: /mnt/dutchf3 + CLASS_WEIGHTS: [0.7151, 0.8811, 0.5156, 0.9346, 0.9683, 0.9852] + +MODEL: + NAME: resnet_unet + IN_CHANNELS: 3 + + +TRAIN: + BATCH_SIZE_PER_GPU: 16 + BEGIN_EPOCH: 0 + END_EPOCH: 300 + MIN_LR: 0.001 + MAX_LR: 0.02 + MOMENTUM: 0.9 + WEIGHT_DECAY: 0.0001 + SNAPSHOTS: 5 + AUGMENTATION: True + DEPTH: "section" # Options are No, Patch and Section + STRIDE: 50 + PATCH_SIZE: 100 + AUGMENTATIONS: + RESIZE: + HEIGHT: 200 + WIDTH: 200 + PAD: + HEIGHT: 256 + WIDTH: 256 + MEAN: 0.0009997 # 0.0009996710808862074 + STD: 0.20977 # 0.20976548783479299 + MODEL_DIR: "models" + + +VALIDATION: + BATCH_SIZE_PER_GPU: 32 + +TEST: + MODEL_PATH: "" + TEST_STRIDE: 10 + SPLIT: 'Both' # Can be Both, Test1, Test2 + INLINE: True + CROSSLINE: True + POST_PROCESSING: + SIZE: 128 + CROP_PIXELS: 14 # Number of pixels to crop top, bottom, left and right + diff --git a/experiments/interpretation/dutchf3_patch/distributed/default.py b/experiments/interpretation/dutchf3_patch/distributed/default.py new file mode 100644 index 00000000..bf23527b --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/distributed/default.py @@ -0,0 +1,106 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from yacs.config import CfgNode as CN + +_C = CN() + +_C.OUTPUT_DIR = "output" +_C.LOG_DIR = "log" +_C.GPUS = (0,) +_C.WORKERS = 4 +_C.PRINT_FREQ = 20 +_C.AUTO_RESUME = False +_C.PIN_MEMORY = True +_C.LOG_CONFIG = "logging.conf" +_C.SEED = 42 + +# Cudnn related params +_C.CUDNN = CN() +_C.CUDNN.BENCHMARK = True +_C.CUDNN.DETERMINISTIC = False +_C.CUDNN.ENABLED = True + + +# DATASET related params +_C.DATASET = CN() +_C.DATASET.ROOT = "" +_C.DATASET.NUM_CLASSES = 6 +_C.DATASET.CLASS_WEIGHTS = [0.7151, 0.8811, 0.5156, 0.9346, 0.9683, 0.9852] + +# common params for NETWORK +_C.MODEL = CN() +_C.MODEL.NAME = "patch_deconvnet" +_C.MODEL.IN_CHANNELS = 1 +_C.MODEL.PRETRAINED = "" +_C.MODEL.EXTRA = CN(new_allowed=True) + + +# training +_C.TRAIN = CN() +_C.TRAIN.MIN_LR = 0.001 +_C.TRAIN.MAX_LR = 0.01 +_C.TRAIN.MOMENTUM = 0.9 +_C.TRAIN.BEGIN_EPOCH = 0 +_C.TRAIN.END_EPOCH = 484 +_C.TRAIN.BATCH_SIZE_PER_GPU = 32 +_C.TRAIN.WEIGHT_DECAY = 0.0001 +_C.TRAIN.SNAPSHOTS = 5 +_C.TRAIN.MODEL_DIR = "models" +_C.TRAIN.AUGMENTATION = True +_C.TRAIN.STRIDE = 50 +_C.TRAIN.PATCH_SIZE = 99 +_C.TRAIN.MEAN = 0.0009997 # 0.0009996710808862074 +_C.TRAIN.STD = 0.21 # 0.20976548783479299 +_C.TRAIN.DEPTH = "None" # Options are None, Patch and Section +# None adds no depth information and the num of channels remains at 1 +# Patch adds depth per patch so is simply the height of that patch from 0 to 1, channels=3 +# Section adds depth per section so contains depth information for the whole section, channels=3 +_C.TRAIN.AUGMENTATIONS = CN() +_C.TRAIN.AUGMENTATIONS.RESIZE = CN() +_C.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT = 200 +_C.TRAIN.AUGMENTATIONS.RESIZE.WIDTH = 200 +_C.TRAIN.AUGMENTATIONS.PAD = CN() +_C.TRAIN.AUGMENTATIONS.PAD.HEIGHT = 256 +_C.TRAIN.AUGMENTATIONS.PAD.WIDTH = 256 + + +# validation +_C.VALIDATION = CN() +_C.VALIDATION.BATCH_SIZE_PER_GPU = 32 + +# TEST +_C.TEST = CN() +_C.TEST.MODEL_PATH = "" +_C.TEST.TEST_STRIDE = 10 +_C.TEST.SPLIT = "Both" # Can be Both, Test1, Test2 +_C.TEST.INLINE = True +_C.TEST.CROSSLINE = True +_C.TEST.POST_PROCESSING = CN() # Model output postprocessing +_C.TEST.POST_PROCESSING.SIZE = 128 # Size to interpolate to in pixels +_C.TEST.POST_PROCESSING.CROP_PIXELS = 14 # Number of pixels to crop top, bottom, left and right + + +def update_config(cfg, options=None, config_file=None): + cfg.defrost() + + if config_file: + cfg.merge_from_file(config_file) + + if options: + cfg.merge_from_list(options) + + cfg.freeze() + + +if __name__ == "__main__": + import sys + + with open(sys.argv[1], "w") as f: + print(_C, file=f) diff --git a/experiments/interpretation/dutchf3_patch/distributed/logging.conf b/experiments/interpretation/dutchf3_patch/distributed/logging.conf new file mode 100644 index 00000000..56334fc4 --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/distributed/logging.conf @@ -0,0 +1,34 @@ +[loggers] +keys=root,__main__,event_handlers + +[handlers] +keys=consoleHandler + +[formatters] +keys=simpleFormatter + +[logger_root] +level=INFO +handlers=consoleHandler + +[logger___main__] +level=INFO +handlers=consoleHandler +qualname=__main__ +propagate=0 + +[logger_event_handlers] +level=INFO +handlers=consoleHandler +qualname=event_handlers +propagate=0 + +[handler_consoleHandler] +class=StreamHandler +level=INFO +formatter=simpleFormatter +args=(sys.stdout,) + +[formatter_simpleFormatter] +format=%(asctime)s - %(name)s - %(levelname)s - %(message)s + diff --git a/experiments/interpretation/dutchf3_patch/distributed/run.sh b/experiments/interpretation/dutchf3_patch/distributed/run.sh new file mode 100644 index 00000000..e69de29b diff --git a/experiments/interpretation/dutchf3_patch/distributed/train.py b/experiments/interpretation/dutchf3_patch/distributed/train.py new file mode 100644 index 00000000..3f19c106 --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/distributed/train.py @@ -0,0 +1,344 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# +# To Run on 2 GPUs +# python -m torch.distributed.launch --nproc_per_node=2 train.py --cfg "configs/hrnet.yaml" +# +# To Test: +# python -m torch.distributed.launch --nproc_per_node=2 train.py TRAIN.END_EPOCH 1 TRAIN.SNAPSHOTS 1 --cfg "configs/hrnet.yaml" --debug +# +# /* spell-checker: disable */ +"""Train models on Dutch F3 dataset + +Trains models using PyTorch DistributedDataParallel +Uses a warmup schedule that then goes into a cyclic learning rate + +Time to run on two V100s for 300 epochs: 2.5 days +""" + +import logging +import logging.config +import os +from os import path + +import cv2 +import fire +import numpy as np +import torch +from albumentations import Compose, HorizontalFlip, Normalize, Resize, PadIfNeeded +from cv_lib.utils import load_log_configuration +from cv_lib.event_handlers import ( + SnapshotHandler, + logging_handlers, + tensorboard_handlers, +) +from cv_lib.event_handlers.logging_handlers import Evaluator +from cv_lib.event_handlers.tensorboard_handlers import ( + create_image_writer, + create_summary_writer, +) +from cv_lib.segmentation import models +from cv_lib.segmentation import extract_metric_from +from deepseismic_interpretation.dutchf3.data import get_patch_loader, decode_segmap +from cv_lib.segmentation.dutchf3.engine import ( + create_supervised_evaluator, + create_supervised_trainer, +) + +from ignite.metrics import Loss +from cv_lib.segmentation.metrics import ( + pixelwise_accuracy, + class_accuracy, + mean_class_accuracy, + class_iou, + mean_iou, +) + +from cv_lib.segmentation.dutchf3.utils import ( + current_datetime, + generate_path, + git_branch, + git_hash, + np_to_tb, +) +from default import _C as config +from default import update_config +from ignite.contrib.handlers import ( + ConcatScheduler, + CosineAnnealingScheduler, + LinearCyclicalScheduler, +) +from ignite.engine import Events +from ignite.utils import convert_tensor +from toolz import compose, curry +from torch.utils import data +from toolz import take + + +def prepare_batch(batch, device=None, non_blocking=False): + x, y = batch + return ( + convert_tensor(x, device=device, non_blocking=non_blocking), + convert_tensor(y, device=device, non_blocking=non_blocking), + ) + + +@curry +def update_sampler_epoch(data_loader, engine): + data_loader.sampler.epoch = engine.state.epoch + + +def run(*options, cfg=None, local_rank=0, debug=False): + """Run training and validation of model + + Notes: + Options can be passed in via the options argument and loaded from the cfg file + Options from default.py will be overridden by options loaded from cfg file + Options passed in via options argument will override option loaded from cfg file + + Args: + *options (str,int ,optional): Options used to overide what is loaded from the + config. To see what options are available consult + default.py + cfg (str, optional): Location of config file to load. Defaults to None. + """ + update_config(config, options=options, config_file=cfg) + + # Start logging + load_log_configuration(config.LOG_CONFIG) + logger = logging.getLogger(__name__) + logger.debug(config.WORKERS) + silence_other_ranks = True + world_size = int(os.environ.get("WORLD_SIZE", 1)) + distributed = world_size > 1 + + if distributed: + # FOR DISTRIBUTED: Set the device according to local_rank. + torch.cuda.set_device(local_rank) + + # FOR DISTRIBUTED: Initialize the backend. torch.distributed.launch will + # provide environment variables, and requires that you use init_method=`env://`. + torch.distributed.init_process_group(backend="nccl", init_method="env://") + + scheduler_step = config.TRAIN.END_EPOCH // config.TRAIN.SNAPSHOTS + torch.backends.cudnn.benchmark = config.CUDNN.BENCHMARK + + torch.manual_seed(config.SEED) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(config.SEED) + np.random.seed(seed=config.SEED) + # Setup Augmentations + basic_aug = Compose( + [ + Normalize(mean=(config.TRAIN.MEAN,), std=(config.TRAIN.STD,), max_pixel_value=1), + Resize( + config.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT, config.TRAIN.AUGMENTATIONS.RESIZE.WIDTH, always_apply=True, + ), + PadIfNeeded( + min_height=config.TRAIN.AUGMENTATIONS.PAD.HEIGHT, + min_width=config.TRAIN.AUGMENTATIONS.PAD.WIDTH, + border_mode=cv2.BORDER_CONSTANT, + always_apply=True, + mask_value=255, + ), + ] + ) + if config.TRAIN.AUGMENTATION: + train_aug = Compose([basic_aug, HorizontalFlip(p=0.5)]) + val_aug = basic_aug + else: + train_aug = val_aug = basic_aug + + TrainPatchLoader = get_patch_loader(config) + + train_set = TrainPatchLoader( + config.DATASET.ROOT, + split="train", + is_transform=True, + stride=config.TRAIN.STRIDE, + patch_size=config.TRAIN.PATCH_SIZE, + augmentations=train_aug, + ) + logger.info(f"Training examples {len(train_set)}") + + val_set = TrainPatchLoader( + config.DATASET.ROOT, + split="val", + is_transform=True, + stride=config.TRAIN.STRIDE, + patch_size=config.TRAIN.PATCH_SIZE, + augmentations=val_aug, + ) + logger.info(f"Validation examples {len(val_set)}") + n_classes = train_set.n_classes + + train_sampler = torch.utils.data.distributed.DistributedSampler(train_set, num_replicas=world_size, rank=local_rank) + + train_loader = data.DataLoader( + train_set, batch_size=config.TRAIN.BATCH_SIZE_PER_GPU, num_workers=config.WORKERS, sampler=train_sampler, + ) + + val_sampler = torch.utils.data.distributed.DistributedSampler(val_set, num_replicas=world_size, rank=local_rank) + + val_loader = data.DataLoader( + val_set, batch_size=config.VALIDATION.BATCH_SIZE_PER_GPU, num_workers=config.WORKERS, sampler=val_sampler, + ) + + model = getattr(models, config.MODEL.NAME).get_seg_model(config) + + device = "cpu" + if torch.cuda.is_available(): + device = "cuda" + model = model.to(device) # Send to GPU + + optimizer = torch.optim.SGD( + model.parameters(), + lr=config.TRAIN.MAX_LR, + momentum=config.TRAIN.MOMENTUM, + weight_decay=config.TRAIN.WEIGHT_DECAY, + ) + + # weights are inversely proportional to the frequency of the classes in + # the training set + class_weights = torch.tensor(config.DATASET.CLASS_WEIGHTS, device=device, requires_grad=False) + + criterion = torch.nn.CrossEntropyLoss(weight=class_weights, ignore_index=255, reduction="mean") + + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device], find_unused_parameters=True) + + snapshot_duration = scheduler_step * len(train_loader) + warmup_duration = 5 * len(train_loader) + warmup_scheduler = LinearCyclicalScheduler( + optimizer, + "lr", + start_value=config.TRAIN.MAX_LR, + end_value=config.TRAIN.MAX_LR * world_size, + cycle_size=10 * len(train_loader), + ) + cosine_scheduler = CosineAnnealingScheduler( + optimizer, "lr", config.TRAIN.MAX_LR * world_size, config.TRAIN.MIN_LR * world_size, snapshot_duration, + ) + + scheduler = ConcatScheduler(schedulers=[warmup_scheduler, cosine_scheduler], durations=[warmup_duration]) + + trainer = create_supervised_trainer(model, optimizer, criterion, prepare_batch, device=device) + + trainer.add_event_handler(Events.ITERATION_STARTED, scheduler) + # Set to update the epoch parameter of our distributed data sampler so that we get + # different shuffles + trainer.add_event_handler(Events.EPOCH_STARTED, update_sampler_epoch(train_loader)) + + if silence_other_ranks & local_rank != 0: + logging.getLogger("ignite.engine.engine.Engine").setLevel(logging.WARNING) + + def _select_pred_and_mask(model_out_dict): + return (model_out_dict["y_pred"].squeeze(), model_out_dict["mask"].squeeze()) + + evaluator = create_supervised_evaluator( + model, + prepare_batch, + metrics={ + "nll": Loss(criterion, output_transform=_select_pred_and_mask, device=device), + "pixa": pixelwise_accuracy(n_classes, output_transform=_select_pred_and_mask, device=device), + "cacc": class_accuracy(n_classes, output_transform=_select_pred_and_mask, device=device), + "mca": mean_class_accuracy(n_classes, output_transform=_select_pred_and_mask, device=device), + "ciou": class_iou(n_classes, output_transform=_select_pred_and_mask, device=device), + "mIoU": mean_iou(n_classes, output_transform=_select_pred_and_mask, device=device), + }, + device=device, + ) + + # Set the validation run to start on the epoch completion of the training run + if debug: + logger.info("Running Validation in Debug/Test mode") + val_loader = take(3, val_loader) + trainer.add_event_handler(Events.EPOCH_COMPLETED, Evaluator(evaluator, val_loader)) + + if local_rank == 0: # Run only on master process + + trainer.add_event_handler( + Events.ITERATION_COMPLETED, logging_handlers.log_training_output(log_interval=config.PRINT_FREQ), + ) + trainer.add_event_handler(Events.EPOCH_STARTED, logging_handlers.log_lr(optimizer)) + + try: + output_dir = generate_path( + config.OUTPUT_DIR, git_branch(), git_hash(), config.MODEL.NAME, current_datetime(), + ) + except TypeError: + output_dir = generate_path(config.OUTPUT_DIR, config.MODEL.NAME, current_datetime(),) + + summary_writer = create_summary_writer(log_dir=path.join(output_dir, config.LOG_DIR)) + logger.info(f"Logging Tensorboard to {path.join(output_dir, config.LOG_DIR)}") + trainer.add_event_handler( + Events.EPOCH_STARTED, tensorboard_handlers.log_lr(summary_writer, optimizer, "epoch"), + ) + trainer.add_event_handler( + Events.ITERATION_COMPLETED, tensorboard_handlers.log_training_output(summary_writer), + ) + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + logging_handlers.log_metrics( + "Validation results", + metrics_dict={ + "nll": "Avg loss :", + "mIoU": " Avg IoU :", + "pixa": "Pixelwise Accuracy :", + "mca": "Mean Class Accuracy :", + }, + ), + ) + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + tensorboard_handlers.log_metrics( + summary_writer, + trainer, + "epoch", + metrics_dict={"mIoU": "Validation/IoU", "nll": "Validation/Loss", "mca": "Validation/MCA",}, + ), + ) + + def _select_max(pred_tensor): + return pred_tensor.max(1)[1] + + def _tensor_to_numpy(pred_tensor): + return pred_tensor.squeeze().cpu().numpy() + + transform_func = compose(np_to_tb, decode_segmap(n_classes=n_classes), _tensor_to_numpy) + + transform_pred = compose(transform_func, _select_max) + + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, create_image_writer(summary_writer, "Validation/Image", "image"), + ) + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + create_image_writer(summary_writer, "Validation/Mask", "mask", transform_func=transform_func), + ) + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + create_image_writer(summary_writer, "Validation/Pred", "y_pred", transform_func=transform_pred,), + ) + + def snapshot_function(): + return (trainer.state.iteration % snapshot_duration) == 0 + + checkpoint_handler = SnapshotHandler( + path.join(output_dir, config.TRAIN.MODEL_DIR), + config.MODEL.NAME, + extract_metric_from("mIoU"), + snapshot_function, + ) + evaluator.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {"model": model}) + + logger.info("Starting training") + + if debug: + logger.info("Running Training in Debug/Test mode") + train_loader = take(3, train_loader) + + trainer.run(train_loader, max_epochs=config.TRAIN.END_EPOCH) + + +if __name__ == "__main__": + fire.Fire(run) diff --git a/experiments/interpretation/dutchf3_patch/distributed/train.sh b/experiments/interpretation/dutchf3_patch/distributed/train.sh new file mode 100755 index 00000000..e9394ecd --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/distributed/train.sh @@ -0,0 +1,3 @@ +#!/bin/bash +export PYTHONPATH=/data/home/mat/repos/DeepSeismic/interpretation:$PYTHONPATH +python -m torch.distributed.launch --nproc_per_node=8 train.py --cfg configs/hrnet.yaml \ No newline at end of file diff --git a/experiments/interpretation/dutchf3_patch/local/configs/hrnet.yaml b/experiments/interpretation/dutchf3_patch/local/configs/hrnet.yaml new file mode 100644 index 00000000..c1964c98 --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/local/configs/hrnet.yaml @@ -0,0 +1,102 @@ +CUDNN: + BENCHMARK: true + DETERMINISTIC: false + ENABLED: true +GPUS: (0,) +OUTPUT_DIR: 'output' +LOG_DIR: 'log' +WORKERS: 4 +PRINT_FREQ: 10 +LOG_CONFIG: logging.conf +SEED: 2019 + + +DATASET: + NUM_CLASSES: 6 + ROOT: /mnt/dutchf3 + CLASS_WEIGHTS: [0.7151, 0.8811, 0.5156, 0.9346, 0.9683, 0.9852] + + +MODEL: + NAME: seg_hrnet + IN_CHANNELS: 3 + PRETRAINED: '/mnt/hrnet_pretrained/image_classification/hrnetv2_w48_imagenet_pretrained.pth' + EXTRA: + FINAL_CONV_KERNEL: 1 + STAGE2: + NUM_MODULES: 1 + NUM_BRANCHES: 2 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + FUSE_METHOD: SUM + STAGE3: + NUM_MODULES: 4 + NUM_BRANCHES: 3 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + - 192 + FUSE_METHOD: SUM + STAGE4: + NUM_MODULES: 3 + NUM_BRANCHES: 4 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + - 192 + - 384 + FUSE_METHOD: SUM + +TRAIN: + BATCH_SIZE_PER_GPU: 16 + BEGIN_EPOCH: 0 + END_EPOCH: 300 + MIN_LR: 0.001 + MAX_LR: 0.02 + MOMENTUM: 0.9 + WEIGHT_DECAY: 0.0001 + SNAPSHOTS: 5 + AUGMENTATION: True + DEPTH: "section" #"patch" # Options are No, Patch and Section + STRIDE: 50 + PATCH_SIZE: 100 + AUGMENTATIONS: + RESIZE: + HEIGHT: 200 + WIDTH: 200 + PAD: + HEIGHT: 256 + WIDTH: 256 + MEAN: 0.0009997 # 0.0009996710808862074 + STD: 0.20977 # 0.20976548783479299 + MODEL_DIR: "models" + + +VALIDATION: + BATCH_SIZE_PER_GPU: 128 + +TEST: + MODEL_PATH: "/data/home/mat/repos/DeepSeismic/experiments/segmentation/dutchf3/local/output/mat/exp/237c16780794800631c3f1895cacc475e15aca99/seg_hrnet/Sep17_115731/models/seg_hrnet_running_model_33.pth" + TEST_STRIDE: 10 + SPLIT: 'Both' # Can be Both, Test1, Test2 + INLINE: True + CROSSLINE: True + POST_PROCESSING: + SIZE: 128 # + CROP_PIXELS: 14 # Number of pixels to crop top, bottom, left and right diff --git a/experiments/interpretation/dutchf3_patch/local/configs/patch_deconvnet.yaml b/experiments/interpretation/dutchf3_patch/local/configs/patch_deconvnet.yaml new file mode 100644 index 00000000..f58406fb --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/local/configs/patch_deconvnet.yaml @@ -0,0 +1,59 @@ +CUDNN: + BENCHMARK: true + DETERMINISTIC: false + ENABLED: true +GPUS: (0,) +OUTPUT_DIR: 'output' +LOG_DIR: 'log' +WORKERS: 4 +PRINT_FREQ: 10 +LOG_CONFIG: logging.conf +SEED: 2019 + + +DATASET: + NUM_CLASSES: 6 + ROOT: /mnt/dutchf3 + CLASS_WEIGHTS: [0.7151, 0.8811, 0.5156, 0.9346, 0.9683, 0.9852] + +MODEL: + NAME: patch_deconvnet + IN_CHANNELS: 1 + + +TRAIN: + BATCH_SIZE_PER_GPU: 64 + BEGIN_EPOCH: 0 + END_EPOCH: 300 + MIN_LR: 0.001 + MAX_LR: 0.02 + MOMENTUM: 0.9 + WEIGHT_DECAY: 0.0001 + SNAPSHOTS: 5 + AUGMENTATION: True + DEPTH: "none" # Options are None, Patch and Section + STRIDE: 50 + PATCH_SIZE: 99 + AUGMENTATIONS: + RESIZE: + HEIGHT: 99 + WIDTH: 99 + PAD: + HEIGHT: 99 + WIDTH: 99 + MEAN: 0.0009997 # 0.0009996710808862074 + STD: 0.20977 # 0.20976548783479299 + MODEL_DIR: "models" + +VALIDATION: + BATCH_SIZE_PER_GPU: 512 + +TEST: + MODEL_PATH: "/data/home/mat/repos/DeepSeismic/interpretation/experiments/segmentation/dutchf3/local/output/mat/exp/5cc37bbe5302e1989ef1388d629400a16f82d1a9/patch_deconvnet/Aug27_200339/models/patch_deconvnet_snapshot1model_50.pth" + TEST_STRIDE: 10 + SPLIT: 'Both' # Can be Both, Test1, Test2 + INLINE: True + CROSSLINE: True + POST_PROCESSING: + SIZE: 99 + CROP_PIXELS: 0 # Number of pixels to crop top, bottom, left and right diff --git a/experiments/interpretation/dutchf3_patch/local/configs/patch_deconvnet_skip.yaml b/experiments/interpretation/dutchf3_patch/local/configs/patch_deconvnet_skip.yaml new file mode 100644 index 00000000..eb89ff00 --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/local/configs/patch_deconvnet_skip.yaml @@ -0,0 +1,59 @@ +CUDNN: + BENCHMARK: true + DETERMINISTIC: false + ENABLED: true +GPUS: (0,) +OUTPUT_DIR: 'output' +LOG_DIR: 'log' +WORKERS: 4 +PRINT_FREQ: 10 +LOG_CONFIG: logging.conf +SEED: 2019 + +DATASET: + NUM_CLASSES: 6 + ROOT: /mnt/dutchf3 + CLASS_WEIGHTS: [0.7151, 0.8811, 0.5156, 0.9346, 0.9683, 0.9852] + +MODEL: + NAME: patch_deconvnet_skip + IN_CHANNELS: 1 + + +TRAIN: + BATCH_SIZE_PER_GPU: 64 + BEGIN_EPOCH: 0 + END_EPOCH: 300 + MIN_LR: 0.001 + MAX_LR: 0.02 + MOMENTUM: 0.9 + WEIGHT_DECAY: 0.0001 + SNAPSHOTS: 5 + AUGMENTATION: True + DEPTH: "none" #"patch" # Options are None, Patch and Section + STRIDE: 50 + PATCH_SIZE: 99 + AUGMENTATIONS: + RESIZE: + HEIGHT: 99 + WIDTH: 99 + PAD: + HEIGHT: 99 + WIDTH: 99 + MEAN: 0.0009997 # 0.0009996710808862074 + STD: 0.20977 # 0.20976548783479299 + MODEL_DIR: "models" + +VALIDATION: + BATCH_SIZE_PER_GPU: 512 + +TEST: + MODEL_PATH: "" + TEST_STRIDE: 10 + SPLIT: 'Both' # Can be Both, Test1, Test2 + INLINE: True + CROSSLINE: True + POST_PROCESSING: + SIZE: 99 # + CROP_PIXELS: 0 # Number of pixels to crop top, bottom, left and right + diff --git a/experiments/interpretation/dutchf3_patch/local/configs/seresnet_unet.yaml b/experiments/interpretation/dutchf3_patch/local/configs/seresnet_unet.yaml new file mode 100644 index 00000000..d0b8126f --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/local/configs/seresnet_unet.yaml @@ -0,0 +1,59 @@ +CUDNN: + BENCHMARK: true + DETERMINISTIC: false + ENABLED: true +GPUS: (0,) +OUTPUT_DIR: 'output' +LOG_DIR: 'log' +WORKERS: 4 +PRINT_FREQ: 10 +LOG_CONFIG: logging.conf +SEED: 2019 + + +DATASET: + NUM_CLASSES: 6 + ROOT: /mnt/dutchf3 + CLASS_WEIGHTS: [0.7151, 0.8811, 0.5156, 0.9346, 0.9683, 0.9852] + +MODEL: + NAME: resnet_unet + IN_CHANNELS: 3 + +TRAIN: + BATCH_SIZE_PER_GPU: 16 + BEGIN_EPOCH: 0 + END_EPOCH: 300 + MIN_LR: 0.001 + MAX_LR: 0.02 + MOMENTUM: 0.9 + WEIGHT_DECAY: 0.0001 + SNAPSHOTS: 5 + AUGMENTATION: True + DEPTH: "section" # Options are No, Patch and Section + STRIDE: 50 + PATCH_SIZE: 100 + AUGMENTATIONS: + RESIZE: + HEIGHT: 200 + WIDTH: 200 + PAD: + HEIGHT: 256 + WIDTH: 256 + MEAN: 0.0009997 # 0.0009996710808862074 + STD: 0.20977 # 0.20976548783479299 + MODEL_DIR: "models" + + +VALIDATION: + BATCH_SIZE_PER_GPU: 32 + +TEST: + MODEL_PATH: "/data/home/mat/repos/DeepSeismic/interpretation/experiments/segmentation/dutchf3/local/output/mat/exp/dc2e2d20b7f6d508beb779ffff37c77d0139e588/resnet_unet/Sep01_125513/models/resnet_unet_snapshot1model_52.pth" + TEST_STRIDE: 10 + SPLIT: 'Both' # Can be Both, Test1, Test2 + INLINE: True + CROSSLINE: True + POST_PROCESSING: + SIZE: 128 + CROP_PIXELS: 14 # Number of pixels to crop top, bottom, left and right \ No newline at end of file diff --git a/experiments/interpretation/dutchf3_patch/local/configs/unet.yaml b/experiments/interpretation/dutchf3_patch/local/configs/unet.yaml new file mode 100644 index 00000000..c31157bf --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/local/configs/unet.yaml @@ -0,0 +1,63 @@ +# UNet configuration + +CUDNN: + BENCHMARK: true + DETERMINISTIC: false + ENABLED: true +GPUS: (0,) +OUTPUT_DIR: 'output' +LOG_DIR: 'log' +WORKERS: 4 +PRINT_FREQ: 10 +LOG_CONFIG: logging.conf +SEED: 2019 + + +DATASET: + NUM_CLASSES: 6 + ROOT: '/mnt/dutchf3' + CLASS_WEIGHTS: [0.7151, 0.8811, 0.5156, 0.9346, 0.9683, 0.9852] + +MODEL: + NAME: resnet_unet + IN_CHANNELS: 3 + + +TRAIN: + BATCH_SIZE_PER_GPU: 16 + BEGIN_EPOCH: 0 + END_EPOCH: 300 + MIN_LR: 0.001 + MAX_LR: 0.02 + MOMENTUM: 0.9 + WEIGHT_DECAY: 0.0001 + SNAPSHOTS: 5 + AUGMENTATION: True + DEPTH: "section" # Options are No, Patch and Section + STRIDE: 50 + PATCH_SIZE: 100 + AUGMENTATIONS: + RESIZE: + HEIGHT: 200 + WIDTH: 200 + PAD: + HEIGHT: 256 + WIDTH: 256 + MEAN: 0.0009997 # 0.0009996710808862074 + STD: 0.20977 # 0.20976548783479299 + MODEL_DIR: "models" + + +VALIDATION: + BATCH_SIZE_PER_GPU: 32 + +TEST: + MODEL_PATH: "" + TEST_STRIDE: 10 + SPLIT: 'Both' # Can be Both, Test1, Test2 + INLINE: True + CROSSLINE: True + POST_PROCESSING: + SIZE: 128 + CROP_PIXELS: 14 # Number of pixels to crop top, bottom, left and right + diff --git a/experiments/interpretation/dutchf3_patch/local/default.py b/experiments/interpretation/dutchf3_patch/local/default.py new file mode 100644 index 00000000..e34627a8 --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/local/default.py @@ -0,0 +1,105 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from yacs.config import CfgNode as CN + +_C = CN() + +_C.OUTPUT_DIR = "output" # This will be the base directory for all output, such as logs and saved models +_C.LOG_DIR = "" # This will be a subdirectory inside OUTPUT_DIR +_C.GPUS = (0,) +_C.WORKERS = 4 +_C.PRINT_FREQ = 20 +_C.AUTO_RESUME = False +_C.PIN_MEMORY = True +_C.LOG_CONFIG = "logging.conf" +_C.SEED = 42 + + +# Cudnn related params +_C.CUDNN = CN() +_C.CUDNN.BENCHMARK = True +_C.CUDNN.DETERMINISTIC = False +_C.CUDNN.ENABLED = True + +# DATASET related params +_C.DATASET = CN() +_C.DATASET.ROOT = "" +_C.DATASET.NUM_CLASSES = 6 +_C.DATASET.CLASS_WEIGHTS = [0.7151, 0.8811, 0.5156, 0.9346, 0.9683, 0.9852] + +# common params for NETWORK +_C.MODEL = CN() +_C.MODEL.NAME = "patch_deconvnet" +_C.MODEL.IN_CHANNELS = 1 +_C.MODEL.PRETRAINED = "" +_C.MODEL.EXTRA = CN(new_allowed=True) + + +# training +_C.TRAIN = CN() +_C.TRAIN.MIN_LR = 0.001 +_C.TRAIN.MAX_LR = 0.01 +_C.TRAIN.MOMENTUM = 0.9 +_C.TRAIN.BEGIN_EPOCH = 0 +_C.TRAIN.END_EPOCH = 484 +_C.TRAIN.BATCH_SIZE_PER_GPU = 32 +_C.TRAIN.WEIGHT_DECAY = 0.0001 +_C.TRAIN.SNAPSHOTS = 5 +_C.TRAIN.MODEL_DIR = "models" # This will be a subdirectory inside OUTPUT_DIR +_C.TRAIN.AUGMENTATION = True +_C.TRAIN.STRIDE = 50 +_C.TRAIN.PATCH_SIZE = 99 +_C.TRAIN.MEAN = 0.0009997 # 0.0009996710808862074 +_C.TRAIN.STD = 0.20977 # 0.20976548783479299 # TODO: Should we apply std scaling? +_C.TRAIN.DEPTH = "no" # Options are None, Patch and Section +# None adds no depth information and the num of channels remains at 1 +# Patch adds depth per patch so is simply the height of that patch from 0 to 1, channels=3 +# Section adds depth per section so contains depth information for the whole section, channels=3 +_C.TRAIN.AUGMENTATIONS = CN() +_C.TRAIN.AUGMENTATIONS.RESIZE = CN() +_C.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT = 200 +_C.TRAIN.AUGMENTATIONS.RESIZE.WIDTH = 200 +_C.TRAIN.AUGMENTATIONS.PAD = CN() +_C.TRAIN.AUGMENTATIONS.PAD.HEIGHT = 256 +_C.TRAIN.AUGMENTATIONS.PAD.WIDTH = 256 + +# validation +_C.VALIDATION = CN() +_C.VALIDATION.BATCH_SIZE_PER_GPU = 32 + +# TEST +_C.TEST = CN() +_C.TEST.MODEL_PATH = "" +_C.TEST.TEST_STRIDE = 10 +_C.TEST.SPLIT = "Both" # Can be Both, Test1, Test2 +_C.TEST.INLINE = True +_C.TEST.CROSSLINE = True +_C.TEST.POST_PROCESSING = CN() # Model output postprocessing +_C.TEST.POST_PROCESSING.SIZE = 128 # Size to interpolate to in pixels +_C.TEST.POST_PROCESSING.CROP_PIXELS = 14 # Number of pixels to crop top, bottom, left and right + + +def update_config(cfg, options=None, config_file=None): + cfg.defrost() + + if config_file: + cfg.merge_from_file(config_file) + + if options: + cfg.merge_from_list(options) + + cfg.freeze() + + +if __name__ == "__main__": + import sys + + with open(sys.argv[1], "w") as f: + print(_C, file=f) diff --git a/experiments/interpretation/dutchf3_patch/local/logging.conf b/experiments/interpretation/dutchf3_patch/local/logging.conf new file mode 100644 index 00000000..56334fc4 --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/local/logging.conf @@ -0,0 +1,34 @@ +[loggers] +keys=root,__main__,event_handlers + +[handlers] +keys=consoleHandler + +[formatters] +keys=simpleFormatter + +[logger_root] +level=INFO +handlers=consoleHandler + +[logger___main__] +level=INFO +handlers=consoleHandler +qualname=__main__ +propagate=0 + +[logger_event_handlers] +level=INFO +handlers=consoleHandler +qualname=event_handlers +propagate=0 + +[handler_consoleHandler] +class=StreamHandler +level=INFO +formatter=simpleFormatter +args=(sys.stdout,) + +[formatter_simpleFormatter] +format=%(asctime)s - %(name)s - %(levelname)s - %(message)s + diff --git a/experiments/interpretation/dutchf3_patch/local/test.py b/experiments/interpretation/dutchf3_patch/local/test.py new file mode 100644 index 00000000..60a9ca00 --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/local/test.py @@ -0,0 +1,407 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# commitHash: c76bf579a0d5090ebd32426907d051d499f3e847 +# url: https://github.com/olivesgatech/facies_classification_benchmark +# +# To Test: +# python test.py TRAIN.END_EPOCH 1 TRAIN.SNAPSHOTS 1 --cfg "configs/hrnet.yaml" --debug +# +# /* spell-checker: disable */ +""" +Modified version of the Alaudah testing script +Runs only on single GPU + +Estimated time to run on single V100: 5 hours +""" + +import itertools +import logging +import logging.config +import os +from os import path + +import cv2 +import fire +import numpy as np +import torch +import torch.nn.functional as F +from albumentations import Compose, Normalize, PadIfNeeded, Resize +from cv_lib.utils import load_log_configuration +from cv_lib.segmentation import models +from deepseismic_interpretation.dutchf3.data import ( + add_patch_depth_channels, + get_seismic_labels, + get_test_loader, +) +from default import _C as config +from default import update_config +from toolz import compose, curry, itertoolz, pipe +from torch.utils import data +from toolz import take + + +_CLASS_NAMES = [ + "upper_ns", + "middle_ns", + "lower_ns", + "rijnland_chalk", + "scruff", + "zechstein", +] + + +class runningScore(object): + def __init__(self, n_classes): + self.n_classes = n_classes + self.confusion_matrix = np.zeros((n_classes, n_classes)) + + def _fast_hist(self, label_true, label_pred, n_class): + mask = (label_true >= 0) & (label_true < n_class) + hist = np.bincount(n_class * label_true[mask].astype(int) + label_pred[mask], minlength=n_class ** 2,).reshape( + n_class, n_class + ) + return hist + + def update(self, label_trues, label_preds): + for lt, lp in zip(label_trues, label_preds): + self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten(), self.n_classes) + + def get_scores(self): + """Returns accuracy score evaluation result. + - overall accuracy + - mean accuracy + - mean IU + - fwavacc + """ + hist = self.confusion_matrix + acc = np.diag(hist).sum() / hist.sum() + acc_cls = np.diag(hist) / hist.sum(axis=1) + mean_acc_cls = np.nanmean(acc_cls) + iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)) + mean_iu = np.nanmean(iu) + freq = hist.sum(axis=1) / hist.sum() # fraction of the pixels that come from each class + fwavacc = (freq[freq > 0] * iu[freq > 0]).sum() + cls_iu = dict(zip(range(self.n_classes), iu)) + + return ( + { + "Pixel Acc: ": acc, + "Class Accuracy: ": acc_cls, + "Mean Class Acc: ": mean_acc_cls, + "Freq Weighted IoU: ": fwavacc, + "Mean IoU: ": mean_iu, + "confusion_matrix": self.confusion_matrix, + }, + cls_iu, + ) + + def reset(self): + self.confusion_matrix = np.zeros((self.n_classes, self.n_classes)) + + +def _transform_CHW_to_HWC(numpy_array): + return np.moveaxis(numpy_array, 0, -1) + + +def _transform_HWC_to_CHW(numpy_array): + return np.moveaxis(numpy_array, -1, 0) + + +@curry +def _apply_augmentation3D(aug, numpy_array): + assert len(numpy_array.shape) == 3, "This method only accepts 3D arrays" + patch = _transform_CHW_to_HWC(numpy_array) + patch = aug(image=patch)["image"] + return _transform_HWC_to_CHW(patch) + + +@curry +def _apply_augmentation2D(aug, numpy_array): + assert len(numpy_array.shape) == 2, "This method only accepts 2D arrays" + return aug(image=numpy_array)["image"] + + +_AUGMENTATION = {3: _apply_augmentation3D, 2: _apply_augmentation2D} + + +@curry +def _apply_augmentation(aug, image): + if isinstance(image, torch.Tensor): + image = image.numpy() + + if aug is not None: + return _AUGMENTATION[len(image.shape)](aug, image) + else: + return image + + +def _add_depth(image): + if isinstance(image, torch.Tensor): + image = image.numpy() + return add_patch_depth_channels(image) + + +def _to_torch(image): + if isinstance(image, torch.Tensor): + return image + else: + return torch.from_numpy(image).to(torch.float32) + + +def _expand_dims_if_necessary(torch_tensor): + if len(torch_tensor.shape) == 2: + return torch_tensor.unsqueeze(dim=0) + else: + return torch_tensor + + +@curry +def _extract_patch(hdx, wdx, ps, patch_size, img_p): + if len(img_p.shape) == 2: # 2D + return img_p[hdx + ps : hdx + ps + patch_size, wdx + ps : wdx + ps + patch_size] + else: # 3D + return img_p[ + :, hdx + ps : hdx + ps + patch_size, wdx + ps : wdx + ps + patch_size, + ] + + +def _compose_processing_pipeline(depth, aug=None): + steps = [] + if aug is not None: + steps.append(_apply_augmentation(aug)) + + if depth == "patch": + steps.append(_add_depth) + + steps.append(_to_torch) + steps.append(_expand_dims_if_necessary) + steps.reverse() + return compose(*steps) + + +def _generate_batches(h, w, ps, patch_size, stride, batch_size=64): + hdc_wdx_generator = itertools.product(range(0, h - patch_size + ps, stride), range(0, w - patch_size + ps, stride),) + for batch_indexes in itertoolz.partition_all(batch_size, hdc_wdx_generator): + yield batch_indexes + + +@curry +def _output_processing_pipeline(config, output): + output = output.unsqueeze(0) + _, _, h, w = output.shape + if config.TEST.POST_PROCESSING.SIZE != h or config.TEST.POST_PROCESSING.SIZE != w: + output = F.interpolate( + output, size=(config.TEST.POST_PROCESSING.SIZE, config.TEST.POST_PROCESSING.SIZE,), mode="bilinear", + ) + + if config.TEST.POST_PROCESSING.CROP_PIXELS > 0: + _, _, h, w = output.shape + output = output[ + :, + :, + config.TEST.POST_PROCESSING.CROP_PIXELS : h - config.TEST.POST_PROCESSING.CROP_PIXELS, + config.TEST.POST_PROCESSING.CROP_PIXELS : w - config.TEST.POST_PROCESSING.CROP_PIXELS, + ] + return output.squeeze() + + +def _patch_label_2d( + model, img, pre_processing, output_processing, patch_size, stride, batch_size, device, num_classes, +): + """Processes a whole section + """ + img = torch.squeeze(img) + h, w = img.shape[-2], img.shape[-1] # height and width + + # Pad image with patch_size/2: + ps = int(np.floor(patch_size / 2)) # pad size + img_p = F.pad(img, pad=(ps, ps, ps, ps), mode="constant", value=0) + output_p = torch.zeros([1, num_classes, h + 2 * ps, w + 2 * ps]) + + # generate output: + for batch_indexes in _generate_batches(h, w, ps, patch_size, stride, batch_size=batch_size): + batch = torch.stack( + [pipe(img_p, _extract_patch(hdx, wdx, ps, patch_size), pre_processing,) for hdx, wdx in batch_indexes], + dim=0, + ) + + model_output = model(batch.to(device)) + for (hdx, wdx), output in zip(batch_indexes, model_output.detach().cpu()): + output = output_processing(output) + output_p[:, :, hdx + ps : hdx + ps + patch_size, wdx + ps : wdx + ps + patch_size,] += output + + # crop the output_p in the middle + output = output_p[:, :, ps:-ps, ps:-ps] + return output + + +@curry +def to_image(label_mask, n_classes=6): + label_colours = get_seismic_labels() + r = label_mask.copy() + g = label_mask.copy() + b = label_mask.copy() + for ll in range(0, n_classes): + r[label_mask == ll] = label_colours[ll, 0] + g[label_mask == ll] = label_colours[ll, 1] + b[label_mask == ll] = label_colours[ll, 2] + rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], label_mask.shape[2], 3)) + rgb[:, :, :, 0] = r + rgb[:, :, :, 1] = g + rgb[:, :, :, 2] = b + return rgb + + +def _evaluate_split( + split, section_aug, model, pre_processing, output_processing, device, running_metrics_overall, config, debug=False +): + logger = logging.getLogger(__name__) + + TestSectionLoader = get_test_loader(config) + test_set = TestSectionLoader(config.DATASET.ROOT, split=split, is_transform=True, augmentations=section_aug,) + + n_classes = test_set.n_classes + + test_loader = data.DataLoader(test_set, batch_size=1, num_workers=config.WORKERS, shuffle=False) + + if debug: + logger.info("Running in Debug/Test mode") + test_loader = take(1, test_loader) + + running_metrics_split = runningScore(n_classes) + + # testing mode: + with torch.no_grad(): # operations inside don't track history + model.eval() + total_iteration = 0 + for i, (images, labels) in enumerate(test_loader): + logger.info(f"split: {split}, section: {i}") + total_iteration = total_iteration + 1 + + outputs = _patch_label_2d( + model, + images, + pre_processing, + output_processing, + config.TRAIN.PATCH_SIZE, + config.TEST.TEST_STRIDE, + config.VALIDATION.BATCH_SIZE_PER_GPU, + device, + n_classes, + ) + + pred = outputs.detach().max(1)[1].numpy() + gt = labels.numpy() + running_metrics_split.update(gt, pred) + running_metrics_overall.update(gt, pred) + + # get scores + score, class_iou = running_metrics_split.get_scores() + + # Log split results + logger.info(f'Pixel Acc: {score["Pixel Acc: "]:.3f}') + for cdx, class_name in enumerate(_CLASS_NAMES): + logger.info(f' {class_name}_accuracy {score["Class Accuracy: "][cdx]:.3f}') + + logger.info(f'Mean Class Acc: {score["Mean Class Acc: "]:.3f}') + logger.info(f'Freq Weighted IoU: {score["Freq Weighted IoU: "]:.3f}') + logger.info(f'Mean IoU: {score["Mean IoU: "]:0.3f}') + running_metrics_split.reset() + + +def _write_section_file(labels, section_file): + # define indices of the array + irange, xrange, depth = labels.shape + + if config.TEST.INLINE: + i_list = list(range(irange)) + i_list = ["i_" + str(inline) for inline in i_list] + else: + i_list = [] + + if config.TEST.CROSSLINE: + x_list = list(range(xrange)) + x_list = ["x_" + str(crossline) for crossline in x_list] + else: + x_list = [] + + list_test = i_list + x_list + + file_object = open(section_file, "w") + file_object.write("\n".join(list_test)) + file_object.close() + + +def test(*options, cfg=None, debug=False): + update_config(config, options=options, config_file=cfg) + n_classes = config.DATASET.NUM_CLASSES + + # Start logging + load_log_configuration(config.LOG_CONFIG) + logger = logging.getLogger(__name__) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + log_dir, model_name = os.path.split(config.TEST.MODEL_PATH) + + # load model: + model = getattr(models, config.MODEL.NAME).get_seg_model(config) + model.load_state_dict(torch.load(config.TEST.MODEL_PATH), strict=False) + model = model.to(device) # Send to GPU if available + + running_metrics_overall = runningScore(n_classes) + + # Augmentation + section_aug = Compose([Normalize(mean=(config.TRAIN.MEAN,), std=(config.TRAIN.STD,), max_pixel_value=1,)]) + + patch_aug = Compose( + [ + Resize( + config.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT, config.TRAIN.AUGMENTATIONS.RESIZE.WIDTH, always_apply=True, + ), + PadIfNeeded( + min_height=config.TRAIN.AUGMENTATIONS.PAD.HEIGHT, + min_width=config.TRAIN.AUGMENTATIONS.PAD.WIDTH, + border_mode=cv2.BORDER_CONSTANT, + always_apply=True, + mask_value=255, + ), + ] + ) + + pre_processing = _compose_processing_pipeline(config.TRAIN.DEPTH, aug=patch_aug) + output_processing = _output_processing_pipeline(config) + + splits = ["test1", "test2"] if "Both" in config.TEST.SPLIT else [config.TEST.SPLIT] + for sdx, split in enumerate(splits): + labels = np.load(path.join(config.DATASET.ROOT, "test_once", split + "_labels.npy")) + section_file = path.join(config.DATASET.ROOT, "splits", "section_" + split + ".txt") + _write_section_file(labels, section_file) + _evaluate_split( + split, + section_aug, + model, + pre_processing, + output_processing, + device, + running_metrics_overall, + config, + debug=debug, + ) + + # FINAL TEST RESULTS: + score, class_iou = running_metrics_overall.get_scores() + + logger.info("--------------- FINAL RESULTS -----------------") + logger.info(f'Pixel Acc: {score["Pixel Acc: "]:.3f}') + for cdx, class_name in enumerate(_CLASS_NAMES): + logger.info(f' {class_name}_accuracy {score["Class Accuracy: "][cdx]:.3f}') + logger.info(f'Mean Class Acc: {score["Mean Class Acc: "]:.3f}') + logger.info(f'Freq Weighted IoU: {score["Freq Weighted IoU: "]:.3f}') + logger.info(f'Mean IoU: {score["Mean IoU: "]:0.3f}') + + # Save confusion matrix: + confusion = score["confusion_matrix"] + np.savetxt(path.join(log_dir, "confusion.csv"), confusion, delimiter=" ") + + +if __name__ == "__main__": + fire.Fire(test) diff --git a/experiments/interpretation/dutchf3_patch/local/test.sh b/experiments/interpretation/dutchf3_patch/local/test.sh new file mode 100644 index 00000000..ad68cf2e --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/local/test.sh @@ -0,0 +1,2 @@ +#!/bin/bash +python test.py --cfg "configs/seresnet_unet.yaml" \ No newline at end of file diff --git a/experiments/interpretation/dutchf3_patch/local/train.py b/experiments/interpretation/dutchf3_patch/local/train.py new file mode 100644 index 00000000..fd8324c9 --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/local/train.py @@ -0,0 +1,297 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# +# To Test: +# python train.py TRAIN.END_EPOCH 1 TRAIN.SNAPSHOTS 1 --cfg "configs/hrnet.yaml" --debug +# +# /* spell-checker: disable */ +"""Train models on Dutch F3 dataset + +Trains models using PyTorch +Uses a warmup schedule that then goes into a cyclic learning rate + +Time to run on single V100 for 300 epochs: 4.5 days +""" + +import logging +import logging.config +from os import path + +import cv2 +import fire +import numpy as np +import torch +from albumentations import Compose, HorizontalFlip, Normalize, PadIfNeeded, Resize +from ignite.contrib.handlers import CosineAnnealingScheduler +from ignite.engine import Events +from ignite.metrics import Loss +from ignite.utils import convert_tensor +from toolz import compose +from torch.utils import data + +from deepseismic_interpretation.dutchf3.data import get_patch_loader, decode_segmap +from cv_lib.utils import load_log_configuration +from cv_lib.event_handlers import ( + SnapshotHandler, + logging_handlers, + tensorboard_handlers, +) +from cv_lib.event_handlers.logging_handlers import Evaluator +from cv_lib.event_handlers.tensorboard_handlers import ( + create_image_writer, + create_summary_writer, +) +from cv_lib.segmentation import models, extract_metric_from +from cv_lib.segmentation.dutchf3.engine import ( + create_supervised_evaluator, + create_supervised_trainer, +) + +from cv_lib.segmentation.metrics import ( + pixelwise_accuracy, + class_accuracy, + mean_class_accuracy, + class_iou, + mean_iou, +) + +from cv_lib.segmentation.dutchf3.utils import ( + current_datetime, + generate_path, + git_branch, + git_hash, + np_to_tb, +) + +from default import _C as config +from default import update_config +from toolz import take + + +def prepare_batch(batch, device=None, non_blocking=False): + x, y = batch + return ( + convert_tensor(x, device=device, non_blocking=non_blocking), + convert_tensor(y, device=device, non_blocking=non_blocking), + ) + + +def run(*options, cfg=None, debug=False): + """Run training and validation of model + + Notes: + Options can be passed in via the options argument and loaded from the cfg file + Options from default.py will be overridden by options loaded from cfg file + Options passed in via options argument will override option loaded from cfg file + + Args: + *options (str,int ,optional): Options used to overide what is loaded from the + config. To see what options are available consult + default.py + cfg (str, optional): Location of config file to load. Defaults to None. + debug (bool): Places scripts in debug/test mode and only executes a few iterations + """ + + update_config(config, options=options, config_file=cfg) + + # Start logging + load_log_configuration(config.LOG_CONFIG) + logger = logging.getLogger(__name__) + logger.debug(config.WORKERS) + scheduler_step = config.TRAIN.END_EPOCH // config.TRAIN.SNAPSHOTS + torch.backends.cudnn.benchmark = config.CUDNN.BENCHMARK + + torch.manual_seed(config.SEED) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(config.SEED) + np.random.seed(seed=config.SEED) + + # Setup Augmentations + basic_aug = Compose( + [ + Normalize(mean=(config.TRAIN.MEAN,), std=(config.TRAIN.STD,), max_pixel_value=1), + Resize( + config.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT, config.TRAIN.AUGMENTATIONS.RESIZE.WIDTH, always_apply=True, + ), + PadIfNeeded( + min_height=config.TRAIN.AUGMENTATIONS.PAD.HEIGHT, + min_width=config.TRAIN.AUGMENTATIONS.PAD.WIDTH, + border_mode=cv2.BORDER_CONSTANT, + always_apply=True, + mask_value=255, + ), + ] + ) + if config.TRAIN.AUGMENTATION: + train_aug = Compose([basic_aug, HorizontalFlip(p=0.5)]) + val_aug = basic_aug + else: + train_aug = val_aug = basic_aug + + TrainPatchLoader = get_patch_loader(config) + + train_set = TrainPatchLoader( + config.DATASET.ROOT, + split="train", + is_transform=True, + stride=config.TRAIN.STRIDE, + patch_size=config.TRAIN.PATCH_SIZE, + augmentations=train_aug, + ) + + val_set = TrainPatchLoader( + config.DATASET.ROOT, + split="val", + is_transform=True, + stride=config.TRAIN.STRIDE, + patch_size=config.TRAIN.PATCH_SIZE, + augmentations=val_aug, + ) + + n_classes = train_set.n_classes + + train_loader = data.DataLoader( + train_set, batch_size=config.TRAIN.BATCH_SIZE_PER_GPU, num_workers=config.WORKERS, shuffle=True, + ) + val_loader = data.DataLoader(val_set, batch_size=config.VALIDATION.BATCH_SIZE_PER_GPU, num_workers=config.WORKERS,) + + model = getattr(models, config.MODEL.NAME).get_seg_model(config) + + device = "cpu" + if torch.cuda.is_available(): + device = "cuda" + model = model.to(device) # Send to GPU + + optimizer = torch.optim.SGD( + model.parameters(), + lr=config.TRAIN.MAX_LR, + momentum=config.TRAIN.MOMENTUM, + weight_decay=config.TRAIN.WEIGHT_DECAY, + ) + + try: + output_dir = generate_path(config.OUTPUT_DIR, git_branch(), git_hash(), config.MODEL.NAME, current_datetime(),) + except TypeError: + output_dir = generate_path(config.OUTPUT_DIR, config.MODEL.NAME, current_datetime(),) + + summary_writer = create_summary_writer(log_dir=path.join(output_dir, config.LOG_DIR)) + + snapshot_duration = scheduler_step * len(train_loader) + scheduler = CosineAnnealingScheduler(optimizer, "lr", config.TRAIN.MAX_LR, config.TRAIN.MIN_LR, snapshot_duration) + + # weights are inversely proportional to the frequency of the classes in the + # training set + class_weights = torch.tensor(config.DATASET.CLASS_WEIGHTS, device=device, requires_grad=False) + + criterion = torch.nn.CrossEntropyLoss(weight=class_weights, ignore_index=255, reduction="mean") + + trainer = create_supervised_trainer(model, optimizer, criterion, prepare_batch, device=device) + + trainer.add_event_handler(Events.ITERATION_STARTED, scheduler) + + trainer.add_event_handler( + Events.ITERATION_COMPLETED, logging_handlers.log_training_output(log_interval=config.PRINT_FREQ), + ) + trainer.add_event_handler(Events.EPOCH_STARTED, logging_handlers.log_lr(optimizer)) + trainer.add_event_handler( + Events.EPOCH_STARTED, tensorboard_handlers.log_lr(summary_writer, optimizer, "epoch"), + ) + trainer.add_event_handler( + Events.ITERATION_COMPLETED, tensorboard_handlers.log_training_output(summary_writer), + ) + + def _select_pred_and_mask(model_out_dict): + return (model_out_dict["y_pred"].squeeze(), model_out_dict["mask"].squeeze()) + + evaluator = create_supervised_evaluator( + model, + prepare_batch, + metrics={ + "nll": Loss(criterion, output_transform=_select_pred_and_mask), + "pixacc": pixelwise_accuracy(n_classes, output_transform=_select_pred_and_mask, device=device), + "cacc": class_accuracy(n_classes, output_transform=_select_pred_and_mask), + "mca": mean_class_accuracy(n_classes, output_transform=_select_pred_and_mask), + "ciou": class_iou(n_classes, output_transform=_select_pred_and_mask), + "mIoU": mean_iou(n_classes, output_transform=_select_pred_and_mask), + }, + device=device, + ) + + # Set the validation run to start on the epoch completion of the training run + if debug: + logger.info("Running Validation in Debug/Test mode") + val_loader = take(3, val_loader) + + trainer.add_event_handler(Events.EPOCH_COMPLETED, Evaluator(evaluator, val_loader)) + + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + logging_handlers.log_metrics( + "Validation results", + metrics_dict={ + "nll": "Avg loss :", + "pixacc": "Pixelwise Accuracy :", + "mca": "Avg Class Accuracy :", + "mIoU": "Avg Class IoU :", + }, + ), + ) + + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + tensorboard_handlers.log_metrics( + summary_writer, + trainer, + "epoch", + metrics_dict={ + "mIoU": "Validation/mIoU", + "nll": "Validation/Loss", + "mca": "Validation/MCA", + "pixacc": "Validation/Pixel_Acc", + }, + ), + ) + + def _select_max(pred_tensor): + return pred_tensor.max(1)[1] + + def _tensor_to_numpy(pred_tensor): + return pred_tensor.squeeze().cpu().numpy() + + transform_func = compose(np_to_tb, decode_segmap(n_classes=n_classes), _tensor_to_numpy) + + transform_pred = compose(transform_func, _select_max) + + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, create_image_writer(summary_writer, "Validation/Image", "image"), + ) + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + create_image_writer(summary_writer, "Validation/Mask", "mask", transform_func=transform_func), + ) + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + create_image_writer(summary_writer, "Validation/Pred", "y_pred", transform_func=transform_pred), + ) + + def snapshot_function(): + return (trainer.state.iteration % snapshot_duration) == 0 + + checkpoint_handler = SnapshotHandler( + path.join(output_dir, config.TRAIN.MODEL_DIR), + config.MODEL.NAME, + extract_metric_from("mIoU"), + snapshot_function, + ) + evaluator.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {"model": model}) + + logger.info("Starting training") + if debug: + logger.info("Running Training in Debug/Test mode") + train_loader = take(3, train_loader) + + trainer.run(train_loader, max_epochs=config.TRAIN.END_EPOCH) + + +if __name__ == "__main__": + fire.Fire(run) diff --git a/experiments/interpretation/dutchf3_patch/local/train.sh b/experiments/interpretation/dutchf3_patch/local/train.sh new file mode 100755 index 00000000..b9bb9338 --- /dev/null +++ b/experiments/interpretation/dutchf3_patch/local/train.sh @@ -0,0 +1,2 @@ +#!/bin/bash +python train.py --cfg "configs/hrnet.yaml" \ No newline at end of file diff --git a/experiments/interpretation/dutchf3_section/README.md b/experiments/interpretation/dutchf3_section/README.md new file mode 100644 index 00000000..66d3cfcd --- /dev/null +++ b/experiments/interpretation/dutchf3_section/README.md @@ -0,0 +1,25 @@ +## F3 Netherlands Section Experiments +In this folder are training and testing scripts that work on the F3 Netherlands dataset. +You can run one model on this dataset: +* [SectionDeconvNet-Skip](local/configs/section_deconvnet_skip.yaml) + +This model takes 2D sections as input from the dataset whether these be inlines or crosslines and provides predictions for whole section. + +To understand the configuration files and the dafault parameters refer to this [section in the top level README](../../../README.md#configuration-files) + +### Setup + +Please set up a conda environment following the instructions in the top-level [README.md](../../../README.md#setting-up-environment) file. +Also follow instructions for [downloading and preparing](../../../README.md#f3-Netherlands) the data. + +### Running experiments + +Now you're all set to run training and testing experiments on the F3 Netherlands dataset. Please start from the `train.sh` and `test.sh` scripts under the `local/` directory, which invoke the corresponding python scripts. Take a look at the project configurations in (e.g in `default.py`) for experiment options and modify if necessary. + +### Monitoring progress with TensorBoard +- from the this directory, run `tensorboard --logdir='output'` (all runtime logging information is +written to the `output` folder +- open a web-browser and go to either vmpublicip:6006 if running remotely or localhost:6006 if running locally +> **NOTE**:If running remotely remember that the port must be open and accessible + +More information on Tensorboard can be found [here](https://www.tensorflow.org/get_started/summaries_and_tensorboard#launching_tensorboard). diff --git a/experiments/interpretation/dutchf3_section/local/configs/section_deconvnet_skip.yaml b/experiments/interpretation/dutchf3_section/local/configs/section_deconvnet_skip.yaml new file mode 100644 index 00000000..9ce3937e --- /dev/null +++ b/experiments/interpretation/dutchf3_section/local/configs/section_deconvnet_skip.yaml @@ -0,0 +1,45 @@ +CUDNN: + BENCHMARK: true + DETERMINISTIC: false + ENABLED: true +GPUS: (0,) +OUTPUT_DIR: 'output' +LOG_DIR: 'log' +WORKERS: 4 +PRINT_FREQ: 10 +LOG_CONFIG: logging.conf +SEED: 2019 + +DATASET: + NUM_CLASSES: 6 + ROOT: /mnt/dutchf3 + CLASS_WEIGHTS: [0.7151, 0.8811, 0.5156, 0.9346, 0.9683, 0.9852] + +MODEL: + NAME: section_deconvnet_skip + IN_CHANNELS: 1 + +TRAIN: + BATCH_SIZE_PER_GPU: 16 + BEGIN_EPOCH: 0 + END_EPOCH: 300 + MIN_LR: 0.001 + MAX_LR: 0.02 + MOMENTUM: 0.9 + WEIGHT_DECAY: 0.0001 + SNAPSHOTS: 5 + AUGMENTATION: True + DEPTH: "none" # Can be None, Patch and Section + MEAN: 0.0009997 # 0.0009996710808862074 + STD: 0.20977 # 0.20976548783479299 + MODEL_DIR: "models" + +VALIDATION: + BATCH_SIZE_PER_GPU: 32 + +TEST: + MODEL_PATH: "" + TEST_STRIDE: 10 + SPLIT: 'Both' # Can be Both, Test1, Test2 + INLINE: True + CROSSLINE: True \ No newline at end of file diff --git a/experiments/interpretation/dutchf3_section/local/default.py b/experiments/interpretation/dutchf3_section/local/default.py new file mode 100644 index 00000000..5e296295 --- /dev/null +++ b/experiments/interpretation/dutchf3_section/local/default.py @@ -0,0 +1,92 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from yacs.config import CfgNode as CN + +_C = CN() + + +_C.OUTPUT_DIR = "output" # Base directory for all output (logs, models, etc) +_C.LOG_DIR = "" # This will be a subdirectory inside OUTPUT_DIR +_C.GPUS = (0,) +_C.WORKERS = 4 +_C.PRINT_FREQ = 20 +_C.AUTO_RESUME = False +_C.PIN_MEMORY = True +_C.LOG_CONFIG = "./logging.conf" # Logging config file relative to the experiment +_C.SEED = 42 + +# Cudnn related params +_C.CUDNN = CN() +_C.CUDNN.BENCHMARK = True +_C.CUDNN.DETERMINISTIC = False +_C.CUDNN.ENABLED = True + +# DATASET related params +_C.DATASET = CN() +_C.DATASET.ROOT = "/mnt/dutchf3" +_C.DATASET.NUM_CLASSES = 6 +_C.DATASET.CLASS_WEIGHTS = [0.7151, 0.8811, 0.5156, 0.9346, 0.9683, 0.9852] + +# common params for NETWORK +_C.MODEL = CN() +_C.MODEL.NAME = "section_deconvnet_skip" +_C.MODEL.IN_CHANNELS = 1 +_C.MODEL.PRETRAINED = "" +_C.MODEL.EXTRA = CN(new_allowed=True) + +# training +_C.TRAIN = CN() +_C.TRAIN.MIN_LR = 0.001 +_C.TRAIN.MAX_LR = 0.01 +_C.TRAIN.MOMENTUM = 0.9 +_C.TRAIN.BEGIN_EPOCH = 0 +_C.TRAIN.END_EPOCH = 100 +_C.TRAIN.BATCH_SIZE_PER_GPU = 16 +_C.TRAIN.WEIGHT_DECAY = 0.0001 +_C.TRAIN.SNAPSHOTS = 5 +_C.TRAIN.MODEL_DIR = "models" # This will be a subdirectory inside OUTPUT_DIR +_C.TRAIN.AUGMENTATION = True +_C.TRAIN.MEAN = 0.0009997 # 0.0009996710808862074 +_C.TRAIN.STD = 0.20977 # 0.20976548783479299 +_C.TRAIN.DEPTH = "none" # Options are 'none', 'patch' and 'section' +# None adds no depth information and the num of channels remains at 1 +# Patch adds depth per patch so is simply the height of that patch from 0 to 1, channels=3 +# Section adds depth per section so contains depth information for the whole section, channels=3 + +# validation +_C.VALIDATION = CN() +_C.VALIDATION.BATCH_SIZE_PER_GPU = 16 + +# TEST +_C.TEST = CN() +_C.TEST.MODEL_PATH = "" +_C.TEST.TEST_STRIDE = 10 +_C.TEST.SPLIT = "Both" # Can be Both, Test1, Test2 +_C.TEST.INLINE = True +_C.TEST.CROSSLINE = True + + +def update_config(cfg, options=None, config_file=None): + cfg.defrost() + + if config_file: + cfg.merge_from_file(config_file) + + if options: + cfg.merge_from_list(options) + + cfg.freeze() + + +if __name__ == "__main__": + import sys + + with open(sys.argv[1], "w") as f: + print(_C, file=f) diff --git a/experiments/interpretation/dutchf3_section/local/logging.conf b/experiments/interpretation/dutchf3_section/local/logging.conf new file mode 100644 index 00000000..56334fc4 --- /dev/null +++ b/experiments/interpretation/dutchf3_section/local/logging.conf @@ -0,0 +1,34 @@ +[loggers] +keys=root,__main__,event_handlers + +[handlers] +keys=consoleHandler + +[formatters] +keys=simpleFormatter + +[logger_root] +level=INFO +handlers=consoleHandler + +[logger___main__] +level=INFO +handlers=consoleHandler +qualname=__main__ +propagate=0 + +[logger_event_handlers] +level=INFO +handlers=consoleHandler +qualname=event_handlers +propagate=0 + +[handler_consoleHandler] +class=StreamHandler +level=INFO +formatter=simpleFormatter +args=(sys.stdout,) + +[formatter_simpleFormatter] +format=%(asctime)s - %(name)s - %(levelname)s - %(message)s + diff --git a/experiments/interpretation/dutchf3_section/local/test.py b/experiments/interpretation/dutchf3_section/local/test.py new file mode 100644 index 00000000..5b4d6858 --- /dev/null +++ b/experiments/interpretation/dutchf3_section/local/test.py @@ -0,0 +1,204 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# commitHash: c76bf579a0d5090ebd32426907d051d499f3e847 +# url: https://github.com/olivesgatech/facies_classification_benchmark + +""" +Modified version of the Alaudah testing script +# TODO: Needs to be improved. Needs to be able to run across multiple GPUs and better +# factoring around the loader +""" + +import logging +import logging.config +import os +from os import path + +import fire +import numpy as np +import torch +from albumentations import Compose, Normalize +from cv_lib.utils import load_log_configuration +from cv_lib.segmentation import models + +from deepseismic_interpretation.dutchf3.data import get_test_loader +from default import _C as config +from default import update_config +from torch.utils import data +from toolz import take + + +_CLASS_NAMES = [ + "upper_ns", + "middle_ns", + "lower_ns", + "rijnland_chalk", + "scruff", + "zechstein", +] + + +class runningScore(object): + def __init__(self, n_classes): + self.n_classes = n_classes + self.confusion_matrix = np.zeros((n_classes, n_classes)) + + def _fast_hist(self, label_true, label_pred, n_class): + mask = (label_true >= 0) & (label_true < n_class) + hist = np.bincount(n_class * label_true[mask].astype(int) + label_pred[mask], minlength=n_class ** 2,).reshape( + n_class, n_class + ) + return hist + + def update(self, label_trues, label_preds): + for lt, lp in zip(label_trues, label_preds): + self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten(), self.n_classes) + + def get_scores(self): + """Returns accuracy score evaluation result. + - overall accuracy + - mean accuracy + - mean IU + - fwavacc + """ + hist = self.confusion_matrix + acc = np.diag(hist).sum() / hist.sum() + acc_cls = np.diag(hist) / hist.sum(axis=1) + mean_acc_cls = np.nanmean(acc_cls) + iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)) + mean_iu = np.nanmean(iu) + freq = hist.sum(axis=1) / hist.sum() # fraction of the pixels that come from each class + fwavacc = (freq[freq > 0] * iu[freq > 0]).sum() + cls_iu = dict(zip(range(self.n_classes), iu)) + + return ( + { + "Pixel Acc: ": acc, + "Class Accuracy: ": acc_cls, + "Mean Class Acc: ": mean_acc_cls, + "Freq Weighted IoU: ": fwavacc, + "Mean IoU: ": mean_iu, + "confusion_matrix": self.confusion_matrix, + }, + cls_iu, + ) + + def reset(self): + self.confusion_matrix = np.zeros((self.n_classes, self.n_classes)) + + +def _evaluate_split(split, section_aug, model, device, running_metrics_overall, config, debug=False): + logger = logging.getLogger(__name__) + + TestSectionLoader = get_test_loader(config) + test_set = TestSectionLoader( + data_dir=config.DATASET.ROOT, split=split, is_transform=True, augmentations=section_aug, + ) + + n_classes = test_set.n_classes + + test_loader = data.DataLoader(test_set, batch_size=1, num_workers=config.WORKERS, shuffle=False) + if debug: + logger.info("Running in Debug/Test mode") + test_loader = take(1, test_loader) + + running_metrics_split = runningScore(n_classes) + + # testing mode: + with torch.no_grad(): # operations inside don't track history + model.eval() + total_iteration = 0 + for i, (images, labels) in enumerate(test_loader): + logger.info(f"split: {split}, section: {i}") + total_iteration = total_iteration + 1 + + outputs = model(images.to(device)) + + pred = outputs.detach().max(1)[1].cpu().numpy() + gt = labels.numpy() + running_metrics_split.update(gt, pred) + running_metrics_overall.update(gt, pred) + + # get scores + score, class_iou = running_metrics_split.get_scores() + + # Log split results + logger.info(f'Pixel Acc: {score["Pixel Acc: "]:.3f}') + for cdx, class_name in enumerate(_CLASS_NAMES): + logger.info(f' {class_name}_accuracy {score["Class Accuracy: "][cdx]:.3f}') + + logger.info(f'Mean Class Acc: {score["Mean Class Acc: "]:.3f}') + logger.info(f'Freq Weighted IoU: {score["Freq Weighted IoU: "]:.3f}') + logger.info(f'Mean IoU: {score["Mean IoU: "]:0.3f}') + running_metrics_split.reset() + + +def _write_section_file(labels, section_file): + # define indices of the array + irange, xrange, depth = labels.shape + + if config.TEST.INLINE: + i_list = list(range(irange)) + i_list = ["i_" + str(inline) for inline in i_list] + else: + i_list = [] + + if config.TEST.CROSSLINE: + x_list = list(range(xrange)) + x_list = ["x_" + str(crossline) for crossline in x_list] + else: + x_list = [] + + list_test = i_list + x_list + + file_object = open(section_file, "w") + file_object.write("\n".join(list_test)) + file_object.close() + + +def test(*options, cfg=None, debug=False): + update_config(config, options=options, config_file=cfg) + n_classes = config.DATASET.NUM_CLASSES + + # Start logging + load_log_configuration(config.LOG_CONFIG) + logger = logging.getLogger(__name__) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + log_dir, _ = os.path.split(config.TEST.MODEL_PATH) + + # load model: + model = getattr(models, config.MODEL.NAME).get_seg_model(config) + model.load_state_dict(torch.load(config.TEST.MODEL_PATH), strict=False) + model = model.to(device) # Send to GPU if available + + running_metrics_overall = runningScore(n_classes) + + # Augmentation + section_aug = Compose([Normalize(mean=(config.TRAIN.MEAN,), std=(config.TRAIN.STD,), max_pixel_value=1,)]) + + splits = ["test1", "test2"] if "Both" in config.TEST.SPLIT else [config.TEST.SPLIT] + + for sdx, split in enumerate(splits): + labels = np.load(path.join(config.DATASET.ROOT, "test_once", split + "_labels.npy")) + section_file = path.join(config.DATASET.ROOT, "splits", "section_" + split + ".txt") + _write_section_file(labels, section_file) + _evaluate_split(split, section_aug, model, device, running_metrics_overall, config, debug=debug) + + # FINAL TEST RESULTS: + score, class_iou = running_metrics_overall.get_scores() + + logger.info("--------------- FINAL RESULTS -----------------") + logger.info(f'Pixel Acc: {score["Pixel Acc: "]:.3f}') + for cdx, class_name in enumerate(_CLASS_NAMES): + logger.info(f' {class_name}_accuracy {score["Class Accuracy: "][cdx]:.3f}') + logger.info(f'Mean Class Acc: {score["Mean Class Acc: "]:.3f}') + logger.info(f'Freq Weighted IoU: {score["Freq Weighted IoU: "]:.3f}') + logger.info(f'Mean IoU: {score["Mean IoU: "]:0.3f}') + + # Save confusion matrix: + confusion = score["confusion_matrix"] + np.savetxt(path.join(log_dir, "confusion.csv"), confusion, delimiter=" ") + + +if __name__ == "__main__": + fire.Fire(test) diff --git a/experiments/interpretation/dutchf3_section/local/train.py b/experiments/interpretation/dutchf3_section/local/train.py new file mode 100644 index 00000000..69b5f7d4 --- /dev/null +++ b/experiments/interpretation/dutchf3_section/local/train.py @@ -0,0 +1,288 @@ +# Copyright (c) Microsoft Corporation. +# # Licensed under the MIT License. +# # /* spell-checker: disable */ + +import logging +import logging.config +from os import path + +import fire +import numpy as np +import torch +from albumentations import Compose, HorizontalFlip, Normalize + +from deepseismic_interpretation.dutchf3.data import decode_segmap, get_section_loader +from cv_lib.utils import load_log_configuration +from cv_lib.event_handlers import ( + SnapshotHandler, + logging_handlers, + tensorboard_handlers, +) +from cv_lib.event_handlers.logging_handlers import Evaluator +from cv_lib.event_handlers.tensorboard_handlers import ( + create_image_writer, + create_summary_writer, +) +from cv_lib.segmentation import models, extract_metric_from +from cv_lib.segmentation.dutchf3.engine import ( + create_supervised_evaluator, + create_supervised_trainer, +) +from cv_lib.segmentation.metrics import ( + pixelwise_accuracy, + class_accuracy, + mean_class_accuracy, + class_iou, + mean_iou, +) +from cv_lib.segmentation.dutchf3.utils import ( + current_datetime, + generate_path, + git_branch, + git_hash, + np_to_tb, +) +from default import _C as config +from default import update_config +from ignite.contrib.handlers import CosineAnnealingScheduler +from ignite.engine import Events +from ignite.utils import convert_tensor +from ignite.metrics import Loss +from toolz import compose +from torch.utils import data +from toolz import take + + +def prepare_batch(batch, device="cuda", non_blocking=False): + x, y = batch + return ( + convert_tensor(x, device=device, non_blocking=non_blocking), + convert_tensor(y, device=device, non_blocking=non_blocking), + ) + + +def run(*options, cfg=None, debug=False): + """Run training and validation of model + + Notes: + Options can be passed in via the options argument and loaded from the cfg file + Options from default.py will be overridden by options loaded from cfg file + Options passed in via options argument will override option loaded from cfg file + + Args: + *options (str,int ,optional): Options used to overide what is loaded from the + config. To see what options are available consult + default.py + cfg (str, optional): Location of config file to load. Defaults to None. + """ + + update_config(config, options=options, config_file=cfg) + + # Start logging + load_log_configuration(config.LOG_CONFIG) + logger = logging.getLogger(__name__) + logger.debug(config.WORKERS) + scheduler_step = config.TRAIN.END_EPOCH // config.TRAIN.SNAPSHOTS + torch.backends.cudnn.benchmark = config.CUDNN.BENCHMARK + + torch.manual_seed(config.SEED) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(config.SEED) + np.random.seed(seed=config.SEED) + + # Setup Augmentations + basic_aug = Compose([Normalize(mean=(config.TRAIN.MEAN,), std=(config.TRAIN.STD,), max_pixel_value=1)]) + if config.TRAIN.AUGMENTATION: + train_aug = Compose([basic_aug, HorizontalFlip(p=0.5)]) + val_aug = basic_aug + else: + train_aug = val_aug = basic_aug + + TrainLoader = get_section_loader(config) + + train_set = TrainLoader(data_dir=config.DATASET.ROOT, split="train", is_transform=True, augmentations=train_aug,) + + val_set = TrainLoader(data_dir=config.DATASET.ROOT, split="val", is_transform=True, augmentations=val_aug,) + + class CustomSampler(torch.utils.data.Sampler): + def __init__(self, data_source): + self.data_source = data_source + + def __iter__(self): + char = ["i" if np.random.randint(2) == 1 else "x"] + self.indices = [idx for (idx, name) in enumerate(self.data_source) if char[0] in name] + return (self.indices[i] for i in torch.randperm(len(self.indices))) + + def __len__(self): + return len(self.data_source) + + n_classes = train_set.n_classes + + val_list = val_set.sections + train_list = val_set.sections + + train_loader = data.DataLoader( + train_set, + batch_size=config.TRAIN.BATCH_SIZE_PER_GPU, + sampler=CustomSampler(train_list), + num_workers=config.WORKERS, + shuffle=False, + ) + + val_loader = data.DataLoader( + val_set, + batch_size=config.VALIDATION.BATCH_SIZE_PER_GPU, + sampler=CustomSampler(val_list), + num_workers=config.WORKERS, + ) + + model = getattr(models, config.MODEL.NAME).get_seg_model(config) + + device = "cpu" + if torch.cuda.is_available(): + device = "cuda" + model = model.to(device) # Send to GPU + + optimizer = torch.optim.SGD( + model.parameters(), + lr=config.TRAIN.MAX_LR, + momentum=config.TRAIN.MOMENTUM, + weight_decay=config.TRAIN.WEIGHT_DECAY, + ) + + try: + output_dir = generate_path(config.OUTPUT_DIR, git_branch(), git_hash(), config.MODEL.NAME, current_datetime(),) + except TypeError: + output_dir = generate_path(config.OUTPUT_DIR, config.MODEL.NAME, current_datetime(),) + + summary_writer = create_summary_writer(log_dir=path.join(output_dir, config.LOG_DIR)) + + snapshot_duration = scheduler_step * len(train_loader) + scheduler = CosineAnnealingScheduler(optimizer, "lr", config.TRAIN.MAX_LR, config.TRAIN.MIN_LR, snapshot_duration) + + # weights are inversely proportional to the frequency of the classes in + # the training set + class_weights = torch.tensor(config.DATASET.CLASS_WEIGHTS, device=device, requires_grad=False) + + criterion = torch.nn.CrossEntropyLoss(weight=class_weights, ignore_index=255, reduction="mean") + + trainer = create_supervised_trainer(model, optimizer, criterion, prepare_batch, device=device) + + trainer.add_event_handler(Events.ITERATION_STARTED, scheduler) + + trainer.add_event_handler( + Events.ITERATION_COMPLETED, logging_handlers.log_training_output(log_interval=config.PRINT_FREQ), + ) + + trainer.add_event_handler(Events.EPOCH_STARTED, logging_handlers.log_lr(optimizer)) + + trainer.add_event_handler( + Events.EPOCH_STARTED, tensorboard_handlers.log_lr(summary_writer, optimizer, "epoch"), + ) + + trainer.add_event_handler( + Events.ITERATION_COMPLETED, tensorboard_handlers.log_training_output(summary_writer), + ) + + def _select_pred_and_mask(model_out_dict): + return (model_out_dict["y_pred"].squeeze(), model_out_dict["mask"].squeeze()) + + evaluator = create_supervised_evaluator( + model, + prepare_batch, + metrics={ + "nll": Loss(criterion, output_transform=_select_pred_and_mask, device=device), + "pixacc": pixelwise_accuracy(n_classes, output_transform=_select_pred_and_mask, device=device), + "cacc": class_accuracy(n_classes, output_transform=_select_pred_and_mask, device=device), + "mca": mean_class_accuracy(n_classes, output_transform=_select_pred_and_mask, device=device), + "ciou": class_iou(n_classes, output_transform=_select_pred_and_mask, device=device), + "mIoU": mean_iou(n_classes, output_transform=_select_pred_and_mask, device=device), + }, + device=device, + ) + + if debug: + logger.info("Running Validation in Debug/Test mode") + val_loader = take(3, val_loader) + trainer.add_event_handler(Events.EPOCH_COMPLETED, Evaluator(evaluator, val_loader)) + + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + logging_handlers.log_metrics( + "Validation results", + metrics_dict={ + "nll": "Avg loss :", + "pixacc": "Pixelwise Accuracy :", + "mca": "Avg Class Accuracy :", + "mIoU": "Avg Class IoU :", + }, + ), + ) + + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + logging_handlers.log_class_metrics( + "Per class validation results", metrics_dict={"ciou": "Class IoU :", "cacc": "Class Accuracy :"}, + ), + ) + + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + tensorboard_handlers.log_metrics( + summary_writer, + trainer, + "epoch", + metrics_dict={ + "mIoU": "Validation/mIoU", + "nll": "Validation/Loss", + "mca": "Validation/MCA", + "pixacc": "Validation/Pixel_Acc", + }, + ), + ) + + def _select_max(pred_tensor): + return pred_tensor.max(1)[1] + + def _tensor_to_numpy(pred_tensor): + return pred_tensor.squeeze().cpu().numpy() + + transform_func = compose(np_to_tb, decode_segmap(n_classes=n_classes), _tensor_to_numpy) + + transform_pred = compose(transform_func, _select_max) + + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, create_image_writer(summary_writer, "Validation/Image", "image"), + ) + + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + create_image_writer(summary_writer, "Validation/Mask", "mask", transform_func=transform_func), + ) + + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + create_image_writer(summary_writer, "Validation/Pred", "y_pred", transform_func=transform_pred), + ) + + def snapshot_function(): + return (trainer.state.iteration % snapshot_duration) == 0 + + checkpoint_handler = SnapshotHandler( + path.join(output_dir, config.TRAIN.MODEL_DIR), + config.MODEL.NAME, + extract_metric_from("mIoU"), + snapshot_function, + ) + + evaluator.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {"model": model}) + + logger.info("Starting training") + if debug: + logger.info("Running Validation in Debug/Test mode") + train_loader = take(3, train_loader) + trainer.run(train_loader, max_epochs=config.TRAIN.END_EPOCH) + + +if __name__ == "__main__": + fire.Fire(run) diff --git a/experiments/interpretation/penobscot/README.md b/experiments/interpretation/penobscot/README.md new file mode 100644 index 00000000..d870ac1c --- /dev/null +++ b/experiments/interpretation/penobscot/README.md @@ -0,0 +1,27 @@ +# Seismic Interpretation on Penobscot dataset +In this folder are training and testing scripts that work on the Penobscot dataset. +You can run two different models on this dataset: +* [HRNet](local/configs/hrnet.yaml) +* [SEResNet](local/configs/seresnet_unet.yaml) + +All these models take 2D patches of the dataset as input and provide predictions for those patches. The patches need to be stitched together to form a whole inline or crossline. + +To understand the configuration files and the dafault parameters refer to this [section in the top level README](../../../README.md#configuration-files) + +### Setup + +Please set up a conda environment following the instructions in the top-level [README.md](../../../README.md#setting-up-environment) file. +Also follow instructions for [downloading and preparing](../../../README.md#penobscot) the data. + +### Usage +- [`train.sh`](local/train.sh) - Will train the Segmentation model. The default configuration will execute for 300 epochs which will complete in around 3 days on a V100 GPU. During these 300 epochs succesive snapshots will be taken. By default a cyclic learning rate is applied. +- [`test.sh`](local/test.sh) - Will test your model against the test portion of the dataset. You will be able to view the performance of the trained model in Tensorboard. + +### Monitoring progress with TensorBoard +- from the this directory, run `tensorboard --logdir='output'` (all runtime logging information is +written to the `output` folder +- open a web-browser and go to either vmpublicip:6006 if running remotely or localhost:6006 if running locally +> **NOTE**:If running remotely remember that the port must be open and accessible + +More information on Tensorboard can be found [here](https://www.tensorflow.org/get_started/summaries_and_tensorboard#launching_tensorboard). + diff --git a/experiments/interpretation/penobscot/local/configs/hrnet.yaml b/experiments/interpretation/penobscot/local/configs/hrnet.yaml new file mode 100644 index 00000000..4ebf6484 --- /dev/null +++ b/experiments/interpretation/penobscot/local/configs/hrnet.yaml @@ -0,0 +1,107 @@ +CUDNN: + BENCHMARK: true + DETERMINISTIC: false + ENABLED: true +GPUS: (0,) +OUTPUT_DIR: 'output' +LOG_DIR: 'log' +WORKERS: 4 +PRINT_FREQ: 10 +LOG_CONFIG: logging.conf +SEED: 2019 + + +DATASET: + NUM_CLASSES: 7 + ROOT: /mnt/penobscot + CLASS_WEIGHTS: [0.02630481, 0.05448931, 0.0811898 , 0.01866496, 0.15868563, 0.0875993 , 0.5730662] + INLINE_HEIGHT: 1501 + INLINE_WIDTH: 481 + +MODEL: + NAME: seg_hrnet + IN_CHANNELS: 3 + PRETRAINED: '/data/hrnet_pretrained/image_classification/hrnetv2_w48_imagenet_pretrained.pth' + EXTRA: + FINAL_CONV_KERNEL: 1 + STAGE2: + NUM_MODULES: 1 + NUM_BRANCHES: 2 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + FUSE_METHOD: SUM + STAGE3: + NUM_MODULES: 4 + NUM_BRANCHES: 3 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + - 192 + FUSE_METHOD: SUM + STAGE4: + NUM_MODULES: 3 + NUM_BRANCHES: 4 + BLOCK: BASIC + NUM_BLOCKS: + - 4 + - 4 + - 4 + - 4 + NUM_CHANNELS: + - 48 + - 96 + - 192 + - 384 + FUSE_METHOD: SUM + +TRAIN: + COMPLETE_PATCHES_ONLY: True + BATCH_SIZE_PER_GPU: 32 + BEGIN_EPOCH: 0 + END_EPOCH: 300 + MIN_LR: 0.0001 + MAX_LR: 0.02 + MOMENTUM: 0.9 + WEIGHT_DECAY: 0.0001 + SNAPSHOTS: 5 + AUGMENTATION: True + DEPTH: "patch" # Options are none, patch and section + STRIDE: 64 + PATCH_SIZE: 128 + AUGMENTATIONS: + RESIZE: + HEIGHT: 256 + WIDTH: 256 + PAD: + HEIGHT: 256 + WIDTH: 256 + MEAN: [-0.0001777, 0.49, -0.0000688] # First value is for images, second for depth and then combination of both + STD: [0.14076 , 0.2717, 0.06286] + MAX: 1 + MODEL_DIR: "models" + + +VALIDATION: + BATCH_SIZE_PER_GPU: 128 + COMPLETE_PATCHES_ONLY: True + +TEST: + COMPLETE_PATCHES_ONLY: False + MODEL_PATH: "/data/home/mat/repos/DeepSeismic/experiments/segmentation/penobscot/local/output/penobscot/437970c875226e7e39c8109c0de8d21c5e5d6e3b/seg_hrnet/Sep25_144942/models/seg_hrnet_running_model_28.pth" + AUGMENTATIONS: + RESIZE: + HEIGHT: 256 + WIDTH: 256 + PAD: + HEIGHT: 256 + WIDTH: 256 diff --git a/experiments/interpretation/penobscot/local/configs/seresnet_unet.yaml b/experiments/interpretation/penobscot/local/configs/seresnet_unet.yaml new file mode 100644 index 00000000..29c61936 --- /dev/null +++ b/experiments/interpretation/penobscot/local/configs/seresnet_unet.yaml @@ -0,0 +1,64 @@ +CUDNN: + BENCHMARK: true + DETERMINISTIC: false + ENABLED: true +GPUS: (0,) +OUTPUT_DIR: 'output' +LOG_DIR: 'log' +WORKERS: 4 +PRINT_FREQ: 10 +LOG_CONFIG: logging.conf +SEED: 2019 + + +DATASET: + NUM_CLASSES: 7 + ROOT: /mnt/penobscot + CLASS_WEIGHTS: [0.02630481, 0.05448931, 0.0811898 , 0.01866496, 0.15868563, 0.0875993 , 0.5730662] + INLINE_HEIGHT: 1501 + INLINE_WIDTH: 481 +MODEL: + NAME: resnet_unet + IN_CHANNELS: 3 + +TRAIN: + COMPLETE_PATCHES_ONLY: True + BATCH_SIZE_PER_GPU: 16 + BEGIN_EPOCH: 0 + END_EPOCH: 300 + MIN_LR: 0.0001 + MAX_LR: 0.006 + MOMENTUM: 0.9 + WEIGHT_DECAY: 0.0001 + SNAPSHOTS: 5 + AUGMENTATION: True + DEPTH: "patch" # Options are none, patch and section + STRIDE: 64 + PATCH_SIZE: 128 + AUGMENTATIONS: + RESIZE: + HEIGHT: 256 + WIDTH: 256 + PAD: + HEIGHT: 256 + WIDTH: 256 + MEAN: [-0.0001777, 0.49, -0.0000688] # First value is for images, second for depth and then combination of both + STD: [0.14076 , 0.2717, 0.06286] + MAX: 1 + MODEL_DIR: "models" + + +VALIDATION: + BATCH_SIZE_PER_GPU: 32 + COMPLETE_PATCHES_ONLY: True + +TEST: + COMPLETE_PATCHES_ONLY: False + MODEL_PATH: "/data/home/vapaunic/repos/DeepSeismic/experiments/interpretation/penobscot/local/output/vapaunic/metrics/4120aa99152b6e4f92f8134b783ac63c8131e1ed/resnet_unet/Nov05_105704/models/resnet_unet_running_model_1.pth" + AUGMENTATIONS: + RESIZE: + HEIGHT: 256 + WIDTH: 256 + PAD: + HEIGHT: 256 + WIDTH: 256 diff --git a/experiments/interpretation/penobscot/local/default.py b/experiments/interpretation/penobscot/local/default.py new file mode 100644 index 00000000..fa8e540e --- /dev/null +++ b/experiments/interpretation/penobscot/local/default.py @@ -0,0 +1,121 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from yacs.config import CfgNode as CN + +_C = CN() + +_C.OUTPUT_DIR = "output" # This will be the base directory for all output, such as logs and saved models + +_C.LOG_DIR = "" # This will be a subdirectory inside OUTPUT_DIR +_C.GPUS = (0,) +_C.WORKERS = 4 +_C.PRINT_FREQ = 20 +_C.AUTO_RESUME = False +_C.PIN_MEMORY = True +_C.LOG_CONFIG = "logging.conf" +_C.SEED = 42 + +# size of voxel cube: WINDOW_SIZE x WINDOW_SIZE x WINDOW_SIZE; used for 3D models only +_C.WINDOW_SIZE = 65 + +# Cudnn related params +_C.CUDNN = CN() +_C.CUDNN.BENCHMARK = True +_C.CUDNN.DETERMINISTIC = False +_C.CUDNN.ENABLED = True + +# DATASET related params +_C.DATASET = CN() +_C.DATASET.ROOT = "" +_C.DATASET.NUM_CLASSES = 7 +_C.DATASET.CLASS_WEIGHTS = [ + 0.02630481, + 0.05448931, + 0.0811898, + 0.01866496, + 0.15868563, + 0.0875993, + 0.5730662, +] +_C.DATASET.INLINE_HEIGHT = 1501 +_C.DATASET.INLINE_WIDTH = 481 + +# common params for NETWORK +_C.MODEL = CN() +_C.MODEL.NAME = "resnet_unet" +_C.MODEL.IN_CHANNELS = 1 +_C.MODEL.PRETRAINED = "" +_C.MODEL.EXTRA = CN(new_allowed=True) + +# training +_C.TRAIN = CN() +_C.TRAIN.COMPLETE_PATCHES_ONLY = True +_C.TRAIN.MIN_LR = 0.001 +_C.TRAIN.MAX_LR = 0.01 +_C.TRAIN.MOMENTUM = 0.9 +_C.TRAIN.BEGIN_EPOCH = 0 +_C.TRAIN.END_EPOCH = 300 +_C.TRAIN.BATCH_SIZE_PER_GPU = 32 +_C.TRAIN.WEIGHT_DECAY = 0.0001 +_C.TRAIN.SNAPSHOTS = 5 +_C.TRAIN.MODEL_DIR = "models" # This will be a subdirectory inside OUTPUT_DIR +_C.TRAIN.AUGMENTATION = True +_C.TRAIN.STRIDE = 64 +_C.TRAIN.PATCH_SIZE = 128 +_C.TRAIN.MEAN = [-0.0001777, 0.49, -0.0000688] # 0.0009996710808862074 +_C.TRAIN.STD = [0.14076, 0.2717, 0.06286] # 0.20976548783479299 +_C.TRAIN.MAX = 1 +_C.TRAIN.DEPTH = "patch" # Options are none, patch and section +# None adds no depth information and the num of channels remains at 1 +# Patch adds depth per patch so is simply the height of that patch from 0 to 1, channels=3 +# Section adds depth per section so contains depth information for the whole section, channels=3 +_C.TRAIN.AUGMENTATIONS = CN() +_C.TRAIN.AUGMENTATIONS.RESIZE = CN() +_C.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT = 256 +_C.TRAIN.AUGMENTATIONS.RESIZE.WIDTH = 256 +_C.TRAIN.AUGMENTATIONS.PAD = CN() +_C.TRAIN.AUGMENTATIONS.PAD.HEIGHT = 256 +_C.TRAIN.AUGMENTATIONS.PAD.WIDTH = 256 + +# validation +_C.VALIDATION = CN() +_C.VALIDATION.BATCH_SIZE_PER_GPU = 32 +_C.VALIDATION.COMPLETE_PATCHES_ONLY = True + +# TEST +_C.TEST = CN() +_C.TEST.MODEL_PATH = "" +_C.TEST.COMPLETE_PATCHES_ONLY = True +_C.TEST.AUGMENTATIONS = CN() +_C.TEST.AUGMENTATIONS.RESIZE = CN() +_C.TEST.AUGMENTATIONS.RESIZE.HEIGHT = 256 +_C.TEST.AUGMENTATIONS.RESIZE.WIDTH = 256 +_C.TEST.AUGMENTATIONS.PAD = CN() +_C.TEST.AUGMENTATIONS.PAD.HEIGHT = 256 +_C.TEST.AUGMENTATIONS.PAD.WIDTH = 256 + + +def update_config(cfg, options=None, config_file=None): + cfg.defrost() + + if config_file: + cfg.merge_from_file(config_file) + + if options: + cfg.merge_from_list(options) + + cfg.freeze() + + +if __name__ == "__main__": + import sys + + with open(sys.argv[1], "w") as f: + print(_C, file=f) diff --git a/experiments/interpretation/penobscot/local/logging.conf b/experiments/interpretation/penobscot/local/logging.conf new file mode 100644 index 00000000..56334fc4 --- /dev/null +++ b/experiments/interpretation/penobscot/local/logging.conf @@ -0,0 +1,34 @@ +[loggers] +keys=root,__main__,event_handlers + +[handlers] +keys=consoleHandler + +[formatters] +keys=simpleFormatter + +[logger_root] +level=INFO +handlers=consoleHandler + +[logger___main__] +level=INFO +handlers=consoleHandler +qualname=__main__ +propagate=0 + +[logger_event_handlers] +level=INFO +handlers=consoleHandler +qualname=event_handlers +propagate=0 + +[handler_consoleHandler] +class=StreamHandler +level=INFO +formatter=simpleFormatter +args=(sys.stdout,) + +[formatter_simpleFormatter] +format=%(asctime)s - %(name)s - %(levelname)s - %(message)s + diff --git a/experiments/interpretation/penobscot/local/test.py b/experiments/interpretation/penobscot/local/test.py new file mode 100644 index 00000000..b8928c1e --- /dev/null +++ b/experiments/interpretation/penobscot/local/test.py @@ -0,0 +1,309 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# +# To Test: +# python test.py TRAIN.END_EPOCH 1 TRAIN.SNAPSHOTS 1 --cfg "configs/hrnet.yaml" --debug +# +# /* spell-checker: disable */ +"""Train models on Penobscot dataset + +Test models using PyTorch + +Time to run on single V100: 30 minutes +""" + + +import logging +import logging.config +from itertools import chain +from os import path + +import cv2 +import fire +import numpy as np +import torch +import torchvision +from albumentations import Compose, Normalize, PadIfNeeded, Resize +from cv_lib.utils import load_log_configuration +from cv_lib.event_handlers import logging_handlers, tensorboard_handlers +from cv_lib.event_handlers.tensorboard_handlers import ( + create_image_writer, + create_summary_writer, +) +from cv_lib.segmentation import models +from cv_lib.segmentation.metrics import ( + pixelwise_accuracy, + class_accuracy, + mean_class_accuracy, + class_iou, + mean_iou, +) +from cv_lib.segmentation.dutchf3.utils import ( + current_datetime, + generate_path, + git_branch, + git_hash, + np_to_tb, +) +from cv_lib.segmentation.penobscot.engine import create_supervised_evaluator +from deepseismic_interpretation.dutchf3.data import decode_segmap +from deepseismic_interpretation.penobscot.data import get_patch_dataset +from deepseismic_interpretation.penobscot.metrics import InlineMeanIoU +from default import _C as config +from default import update_config +from ignite.engine import Events +from ignite.metrics import Loss +from ignite.utils import convert_tensor +from toolz import compose, tail, take +from toolz.sandbox.core import unzip +from torch.utils import data + + +def _prepare_batch(batch, device=None, non_blocking=False): + x, y, ids, patch_locations = batch + return ( + convert_tensor(x, device=device, non_blocking=non_blocking), + convert_tensor(y, device=device, non_blocking=non_blocking), + ids, + patch_locations, + ) + + +def _padding_from(config): + padding_height = config.TEST.AUGMENTATIONS.PAD.HEIGHT - config.TEST.AUGMENTATIONS.RESIZE.HEIGHT + padding_width = config.TEST.AUGMENTATIONS.PAD.WIDTH - config.TEST.AUGMENTATIONS.RESIZE.WIDTH + assert padding_height == padding_width, "The padding for the height and width need to be the same" + return int(padding_height) + + +def _scale_from(config): + scale_height = config.TEST.AUGMENTATIONS.PAD.HEIGHT / config.TRAIN.PATCH_SIZE + scale_width = config.TEST.AUGMENTATIONS.PAD.WIDTH / config.TRAIN.PATCH_SIZE + assert ( + config.TEST.AUGMENTATIONS.PAD.HEIGHT % config.TRAIN.PATCH_SIZE == 0 + ), "The scaling between the patch height and resized height must be whole number" + assert ( + config.TEST.AUGMENTATIONS.PAD.WIDTH % config.TRAIN.PATCH_SIZE == 0 + ), "The scaling between the patch width and resized height must be whole number" + assert scale_height == scale_width, "The scaling for the height and width must be the same" + return int(scale_height) + + +_SEG_COLOURS = np.asarray( + [[241, 238, 246], [208, 209, 230], [166, 189, 219], [116, 169, 207], [54, 144, 192], [5, 112, 176], [3, 78, 123],] +) + + +def _log_tensor_to_tensorboard(images_tensor, identifier, summary_writer, evaluator): + image_grid = torchvision.utils.make_grid(images_tensor, normalize=False, scale_each=False, nrow=2) + summary_writer.add_image(identifier, image_grid, evaluator.state.epoch) + + +_TOP_K = 2 # Number of best performing inlines to log to tensorboard +_BOTTOM_K = 2 # Number of worst performing inlines to log to tensorboard +mask_value = 255 + + +def run(*options, cfg=None, debug=False): + """Run testing of model + + Notes: + Options can be passed in via the options argument and loaded from the cfg file + Options from default.py will be overridden by options loaded from cfg file + Options passed in via options argument will override option loaded from cfg file + + Args: + *options (str,int ,optional): Options used to overide what is loaded from the + config. To see what options are available consult + default.py + cfg (str, optional): Location of config file to load. Defaults to None. + """ + + update_config(config, options=options, config_file=cfg) + + # Start logging + load_log_configuration(config.LOG_CONFIG) + logger = logging.getLogger(__name__) + logger.debug(config.WORKERS) + torch.backends.cudnn.benchmark = config.CUDNN.BENCHMARK + + torch.manual_seed(config.SEED) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(config.SEED) + np.random.seed(seed=config.SEED) + + # Setup Augmentations + test_aug = Compose( + [ + Normalize(mean=(config.TRAIN.MEAN,), std=(config.TRAIN.STD,), max_pixel_value=config.TRAIN.MAX,), + PadIfNeeded( + min_height=config.TRAIN.PATCH_SIZE, + min_width=config.TRAIN.PATCH_SIZE, + border_mode=cv2.BORDER_CONSTANT, + always_apply=True, + mask_value=mask_value, + value=0, + ), + Resize( + config.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT, config.TRAIN.AUGMENTATIONS.RESIZE.WIDTH, always_apply=True, + ), + PadIfNeeded( + min_height=config.TRAIN.AUGMENTATIONS.PAD.HEIGHT, + min_width=config.TRAIN.AUGMENTATIONS.PAD.WIDTH, + border_mode=cv2.BORDER_CONSTANT, + always_apply=True, + mask_value=mask_value, + value=0, + ), + ] + ) + + PenobscotDataset = get_patch_dataset(config) + + test_set = PenobscotDataset( + config.DATASET.ROOT, + config.TRAIN.PATCH_SIZE, + config.TRAIN.STRIDE, + split="test", + transforms=test_aug, + n_channels=config.MODEL.IN_CHANNELS, + complete_patches_only=config.TEST.COMPLETE_PATCHES_ONLY, + ) + + logger.info(str(test_set)) + n_classes = test_set.n_classes + + test_loader = data.DataLoader( + test_set, batch_size=config.VALIDATION.BATCH_SIZE_PER_GPU, num_workers=config.WORKERS, + ) + + model = getattr(models, config.MODEL.NAME).get_seg_model(config) + logger.info(f"Loading model {config.TEST.MODEL_PATH}") + model.load_state_dict(torch.load(config.TEST.MODEL_PATH), strict=False) + + device = "cpu" + if torch.cuda.is_available(): + device = "cuda" + model = model.to(device) # Send to GPU + + try: + output_dir = generate_path(config.OUTPUT_DIR, git_branch(), git_hash(), config.MODEL.NAME, current_datetime(),) + except TypeError: + output_dir = generate_path(config.OUTPUT_DIR, config.MODEL.NAME, current_datetime(),) + + summary_writer = create_summary_writer(log_dir=path.join(output_dir, config.LOG_DIR)) + + # weights are inversely proportional to the frequency of the classes in + # the training set + class_weights = torch.tensor(config.DATASET.CLASS_WEIGHTS, device=device, requires_grad=False) + + criterion = torch.nn.CrossEntropyLoss(weight=class_weights, ignore_index=mask_value, reduction="mean") + + def _select_pred_and_mask(model_out_dict): + return (model_out_dict["y_pred"].squeeze(), model_out_dict["mask"].squeeze()) + + def _select_all(model_out_dict): + return ( + model_out_dict["y_pred"].squeeze(), + model_out_dict["mask"].squeeze(), + model_out_dict["ids"], + model_out_dict["patch_locations"], + ) + + inline_mean_iou = InlineMeanIoU( + config.DATASET.INLINE_HEIGHT, + config.DATASET.INLINE_WIDTH, + config.TRAIN.PATCH_SIZE, + n_classes, + padding=_padding_from(config), + scale=_scale_from(config), + output_transform=_select_all, + ) + + evaluator = create_supervised_evaluator( + model, + _prepare_batch, + metrics={ + "nll": Loss(criterion, output_transform=_select_pred_and_mask, device=device), + "inIoU": inline_mean_iou, + "pixa": pixelwise_accuracy(n_classes, output_transform=_select_pred_and_mask, device=device), + "cacc": class_accuracy(n_classes, output_transform=_select_pred_and_mask, device=device), + "mca": mean_class_accuracy(n_classes, output_transform=_select_pred_and_mask, device=device), + "ciou": class_iou(n_classes, output_transform=_select_pred_and_mask, device=device), + "mIoU": mean_iou(n_classes, output_transform=_select_pred_and_mask, device=device), + }, + device=device, + ) + + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + logging_handlers.log_metrics( + "Test results", + metrics_dict={ + "nll": "Avg loss :", + "mIoU": "Avg IoU :", + "pixa": "Pixelwise Accuracy :", + "mca": "Mean Class Accuracy :", + "inIoU": "Mean Inline IoU :", + }, + ), + ) + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + tensorboard_handlers.log_metrics( + summary_writer, + evaluator, + "epoch", + metrics_dict={"mIoU": "Test/IoU", "nll": "Test/Loss", "mca": "Test/MCA", "inIoU": "Test/MeanInlineIoU",}, + ), + ) + + def _select_max(pred_tensor): + return pred_tensor.max(1)[1] + + def _tensor_to_numpy(pred_tensor): + return pred_tensor.squeeze().cpu().numpy() + + transform_func = compose( + np_to_tb, decode_segmap(n_classes=n_classes, label_colours=_SEG_COLOURS), _tensor_to_numpy, + ) + + transform_pred = compose(transform_func, _select_max) + + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, create_image_writer(summary_writer, "Test/Image", "image"), + ) + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, create_image_writer(summary_writer, "Test/Mask", "mask", transform_func=transform_func), + ) + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + create_image_writer(summary_writer, "Test/Pred", "y_pred", transform_func=transform_pred), + ) + + logger.info("Starting training") + if debug: + logger.info("Running in Debug/Test mode") + test_loader = take(3, test_loader) + + evaluator.run(test_loader, max_epochs=1) + + # Log top N and bottom N inlines in terms of IoU to tensorboard + inline_ious = inline_mean_iou.iou_per_inline() + sorted_ious = sorted(inline_ious.items(), key=lambda x: x[1], reverse=True) + topk = ((inline_mean_iou.predictions[key], inline_mean_iou.masks[key]) for key, iou in take(_TOP_K, sorted_ious)) + bottomk = ( + (inline_mean_iou.predictions[key], inline_mean_iou.masks[key]) for key, iou in tail(_BOTTOM_K, sorted_ious) + ) + stack_and_decode = compose(transform_func, torch.stack) + predictions, masks = unzip(chain(topk, bottomk)) + predictions_tensor = stack_and_decode(list(predictions)) + masks_tensor = stack_and_decode(list(masks)) + _log_tensor_to_tensorboard(predictions_tensor, "Test/InlinePredictions", summary_writer, evaluator) + _log_tensor_to_tensorboard(masks_tensor, "Test/InlineMasks", summary_writer, evaluator) + + summary_writer.close() + + +if __name__ == "__main__": + fire.Fire(run) diff --git a/experiments/interpretation/penobscot/local/test.sh b/experiments/interpretation/penobscot/local/test.sh new file mode 100755 index 00000000..ad68cf2e --- /dev/null +++ b/experiments/interpretation/penobscot/local/test.sh @@ -0,0 +1,2 @@ +#!/bin/bash +python test.py --cfg "configs/seresnet_unet.yaml" \ No newline at end of file diff --git a/experiments/interpretation/penobscot/local/train.py b/experiments/interpretation/penobscot/local/train.py new file mode 100644 index 00000000..6b86956d --- /dev/null +++ b/experiments/interpretation/penobscot/local/train.py @@ -0,0 +1,316 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# +# To Test: +# python train.py TRAIN.END_EPOCH 1 TRAIN.SNAPSHOTS 1 --cfg "configs/hrnet.yaml" --debug +# +# /* spell-checker: disable */ +"""Train models on Penobscot dataset + +Trains models using PyTorch +Uses a warmup schedule that then goes into a cyclic learning rate + +Time to run on single V100 for 300 epochs: 3.5 days +""" + +import logging +import logging.config +from os import path + +import cv2 +import fire +import numpy as np +import torch +from albumentations import Compose, HorizontalFlip, Normalize, PadIfNeeded, Resize +from ignite.contrib.handlers import CosineAnnealingScheduler +from ignite.engine import Events +from ignite.metrics import Loss +from ignite.utils import convert_tensor +from toolz import compose +from torch.utils import data + +from deepseismic_interpretation.dutchf3.data import decode_segmap +from deepseismic_interpretation.penobscot.data import get_patch_dataset +from cv_lib.utils import load_log_configuration +from cv_lib.event_handlers import ( + SnapshotHandler, + logging_handlers, + tensorboard_handlers, +) +from cv_lib.event_handlers.logging_handlers import Evaluator +from cv_lib.event_handlers.tensorboard_handlers import ( + create_image_writer, + create_summary_writer, +) +from cv_lib.segmentation import models, extract_metric_from +from cv_lib.segmentation.penobscot.engine import ( + create_supervised_evaluator, + create_supervised_trainer, +) +from cv_lib.segmentation.metrics import ( + pixelwise_accuracy, + class_accuracy, + mean_class_accuracy, + class_iou, + mean_iou, +) +from cv_lib.segmentation.dutchf3.utils import ( + current_datetime, + generate_path, + git_branch, + git_hash, + np_to_tb, +) + +from default import _C as config +from default import update_config +from toolz import take + + +mask_value = 255 +_SEG_COLOURS = np.asarray( + [[241, 238, 246], [208, 209, 230], [166, 189, 219], [116, 169, 207], [54, 144, 192], [5, 112, 176], [3, 78, 123],] +) + + +def _prepare_batch(batch, device=None, non_blocking=False): + x, y, ids, patch_locations = batch + return ( + convert_tensor(x, device=device, non_blocking=non_blocking), + convert_tensor(y, device=device, non_blocking=non_blocking), + ids, + patch_locations, + ) + + +def run(*options, cfg=None, debug=False): + """Run training and validation of model + + Notes: + Options can be passed in via the options argument and loaded from the cfg file + Options loaded from default.py will be overridden by those loaded from cfg file + Options passed in via options argument will override those loaded from cfg file + + Args: + *options (str, int, optional): Options used to overide what is loaded from the + config. To see what options are available consult + default.py + cfg (str, optional): Location of config file to load. Defaults to None. + debug (bool): Places scripts in debug/test mode and only executes a few iterations + """ + + update_config(config, options=options, config_file=cfg) + + # Start logging + load_log_configuration(config.LOG_CONFIG) + logger = logging.getLogger(__name__) + logger.debug(config.WORKERS) + scheduler_step = config.TRAIN.END_EPOCH // config.TRAIN.SNAPSHOTS + torch.backends.cudnn.benchmark = config.CUDNN.BENCHMARK + + torch.manual_seed(config.SEED) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(config.SEED) + np.random.seed(seed=config.SEED) + + device = "cpu" + if torch.cuda.is_available(): + device = "cuda" + + # Setup Augmentations + basic_aug = Compose( + [ + Normalize(mean=(config.TRAIN.MEAN,), std=(config.TRAIN.STD,), max_pixel_value=config.TRAIN.MAX,), + PadIfNeeded( + min_height=config.TRAIN.PATCH_SIZE, + min_width=config.TRAIN.PATCH_SIZE, + border_mode=cv2.BORDER_CONSTANT, + always_apply=True, + mask_value=mask_value, + value=0, + ), + Resize( + config.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT, config.TRAIN.AUGMENTATIONS.RESIZE.WIDTH, always_apply=True, + ), + PadIfNeeded( + min_height=config.TRAIN.AUGMENTATIONS.PAD.HEIGHT, + min_width=config.TRAIN.AUGMENTATIONS.PAD.WIDTH, + border_mode=cv2.BORDER_CONSTANT, + always_apply=True, + mask_value=mask_value, + value=0, + ), + ] + ) + if config.TRAIN.AUGMENTATION: + train_aug = Compose([basic_aug, HorizontalFlip(p=0.5)]) + val_aug = basic_aug + else: + train_aug = val_aug = basic_aug + + PenobscotDataset = get_patch_dataset(config) + + train_set = PenobscotDataset( + config.DATASET.ROOT, + config.TRAIN.PATCH_SIZE, + config.TRAIN.STRIDE, + split="train", + transforms=train_aug, + n_channels=config.MODEL.IN_CHANNELS, + complete_patches_only=config.TRAIN.COMPLETE_PATCHES_ONLY, + ) + + val_set = PenobscotDataset( + config.DATASET.ROOT, + config.TRAIN.PATCH_SIZE, + config.TRAIN.STRIDE, + split="val", + transforms=val_aug, + n_channels=config.MODEL.IN_CHANNELS, + complete_patches_only=config.VALIDATION.COMPLETE_PATCHES_ONLY, + ) + logger.info(train_set) + logger.info(val_set) + n_classes = train_set.n_classes + + train_loader = data.DataLoader( + train_set, batch_size=config.TRAIN.BATCH_SIZE_PER_GPU, num_workers=config.WORKERS, shuffle=True, + ) + + val_loader = data.DataLoader(val_set, batch_size=config.VALIDATION.BATCH_SIZE_PER_GPU, num_workers=config.WORKERS,) + + model = getattr(models, config.MODEL.NAME).get_seg_model(config) + + model = model.to(device) # Send to GPU + + optimizer = torch.optim.SGD( + model.parameters(), + lr=config.TRAIN.MAX_LR, + momentum=config.TRAIN.MOMENTUM, + weight_decay=config.TRAIN.WEIGHT_DECAY, + ) + + try: + output_dir = generate_path(config.OUTPUT_DIR, git_branch(), git_hash(), config.MODEL.NAME, current_datetime(),) + except TypeError: + output_dir = generate_path(config.OUTPUT_DIR, config.MODEL.NAME, current_datetime(),) + + summary_writer = create_summary_writer(log_dir=path.join(output_dir, config.LOG_DIR)) + snapshot_duration = scheduler_step * len(train_loader) + scheduler = CosineAnnealingScheduler(optimizer, "lr", config.TRAIN.MAX_LR, config.TRAIN.MIN_LR, snapshot_duration) + + # weights are inversely proportional to the frequency of the classes in + # the training set + class_weights = torch.tensor(config.DATASET.CLASS_WEIGHTS, device=device, requires_grad=False) + + criterion = torch.nn.CrossEntropyLoss(weight=class_weights, ignore_index=mask_value, reduction="mean") + + trainer = create_supervised_trainer(model, optimizer, criterion, _prepare_batch, device=device) + + trainer.add_event_handler(Events.ITERATION_STARTED, scheduler) + + trainer.add_event_handler( + Events.ITERATION_COMPLETED, logging_handlers.log_training_output(log_interval=config.PRINT_FREQ), + ) + trainer.add_event_handler(Events.EPOCH_STARTED, logging_handlers.log_lr(optimizer)) + trainer.add_event_handler( + Events.EPOCH_STARTED, tensorboard_handlers.log_lr(summary_writer, optimizer, "epoch"), + ) + trainer.add_event_handler( + Events.ITERATION_COMPLETED, tensorboard_handlers.log_training_output(summary_writer), + ) + + def _select_pred_and_mask(model_out_dict): + return (model_out_dict["y_pred"].squeeze(), model_out_dict["mask"].squeeze()) + + evaluator = create_supervised_evaluator( + model, + _prepare_batch, + metrics={ + "pixacc": pixelwise_accuracy(n_classes, output_transform=_select_pred_and_mask), + "nll": Loss(criterion, output_transform=_select_pred_and_mask), + "cacc": class_accuracy(n_classes, output_transform=_select_pred_and_mask), + "mca": mean_class_accuracy(n_classes, output_transform=_select_pred_and_mask), + "ciou": class_iou(n_classes, output_transform=_select_pred_and_mask), + "mIoU": mean_iou(n_classes, output_transform=_select_pred_and_mask), + }, + device=device, + ) + + # Set the validation run to start on the epoch completion of the training run + if debug: + logger.info("Running Validation in Debug/Test mode") + val_loader = take(3, val_loader) + trainer.add_event_handler(Events.EPOCH_COMPLETED, Evaluator(evaluator, val_loader)) + + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + logging_handlers.log_metrics( + "Validation results", + metrics_dict={ + "nll": "Avg loss :", + "pixacc": "Pixelwise Accuracy :", + "mca": "Avg Class Accuracy :", + "mIoU": "Avg Class IoU :", + }, + ), + ) + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + tensorboard_handlers.log_metrics( + summary_writer, + trainer, + "epoch", + metrics_dict={ + "mIoU": "Validation/mIoU", + "nll": "Validation/Loss", + "mca": "Validation/MCA", + "pixacc": "Validation/Pixel_Acc", + }, + ), + ) + + def _select_max(pred_tensor): + return pred_tensor.max(1)[1] + + def _tensor_to_numpy(pred_tensor): + return pred_tensor.squeeze().cpu().numpy() + + transform_func = compose( + np_to_tb, decode_segmap(n_classes=n_classes, label_colours=_SEG_COLOURS), _tensor_to_numpy, + ) + + transform_pred = compose(transform_func, _select_max) + + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, create_image_writer(summary_writer, "Validation/Image", "image"), + ) + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + create_image_writer(summary_writer, "Validation/Mask", "mask", transform_func=transform_func), + ) + evaluator.add_event_handler( + Events.EPOCH_COMPLETED, + create_image_writer(summary_writer, "Validation/Pred", "y_pred", transform_func=transform_pred), + ) + + def snapshot_function(): + return (trainer.state.iteration % snapshot_duration) == 0 + + checkpoint_handler = SnapshotHandler( + path.join(output_dir, config.TRAIN.MODEL_DIR), + config.MODEL.NAME, + extract_metric_from("mIoU"), + snapshot_function, + ) + evaluator.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {"model": model}) + + logger.info("Starting training") + if debug: + logger.info("Running Training in Debug/Test mode") + train_loader = take(3, train_loader) + trainer.run(train_loader, max_epochs=config.TRAIN.END_EPOCH) + + +if __name__ == "__main__": + fire.Fire(run) diff --git a/experiments/interpretation/penobscot/local/train.sh b/experiments/interpretation/penobscot/local/train.sh new file mode 100755 index 00000000..eb885b98 --- /dev/null +++ b/experiments/interpretation/penobscot/local/train.sh @@ -0,0 +1,2 @@ +#!/bin/bash +python train.py --cfg "configs/seresnet_unet.yaml" \ No newline at end of file diff --git a/interpretation/deepseismic_interpretation/__init__.py b/interpretation/deepseismic_interpretation/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/interpretation/deepseismic_interpretation/azureml_tools/__init__.py b/interpretation/deepseismic_interpretation/azureml_tools/__init__.py new file mode 100644 index 00000000..962a4ec2 --- /dev/null +++ b/interpretation/deepseismic_interpretation/azureml_tools/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from deepseismic_interpretation.azureml_tools.workspace import workspace_for_user +from deepseismic_interpretation.azureml_tools.experiment import PyTorchExperiment diff --git a/interpretation/deepseismic_interpretation/azureml_tools/config.py b/interpretation/deepseismic_interpretation/azureml_tools/config.py new file mode 100644 index 00000000..afcb7fdb --- /dev/null +++ b/interpretation/deepseismic_interpretation/azureml_tools/config.py @@ -0,0 +1,68 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + + +import ast +import logging + +from dotenv import dotenv_values, find_dotenv, set_key + +_DEFAULTS = { + "CLUSTER_NAME": "gpucluster24rv3", + "CLUSTER_VM_SIZE": "Standard_NC24rs_v3", + "CLUSTER_MIN_NODES": 0, + "CLUSTER_MAX_NODES": 2, + "WORKSPACE": "workspace", + "RESOURCE_GROUP": "amlccrg", + "REGION": "eastus", + "DATASTORE_NAME": "datastore", + "CONTAINER_NAME": "container", + "ACCOUNT_NAME": "premiumstorage", + "SUBSCRIPTION_ID": None, +} + + +def load_config(dot_env_path: find_dotenv(raise_error_if_not_found=True)): + """ Load the variables from the .env file + Returns: + .env variables(dict) + """ + logger = logging.getLogger(__name__) + logger.info(f"Found config in {dot_env_path}") + return dotenv_values(dot_env_path) + + +def _convert(value): + try: + return ast.literal_eval(value) + except (ValueError, SyntaxError): + return value + + +class AzureMLConfig: + """Creates AzureMLConfig object + + Stores all the configuration options and syncs them with the .env file + """ + + _reserved = ("_dot_env_path",) + + def __init__(self): + self._dot_env_path = find_dotenv(raise_error_if_not_found=True) + + for k, v in load_config(dot_env_path=self._dot_env_path).items(): + self.__dict__[k] = _convert(v) + + for k, v in _DEFAULTS.items(): + if k not in self.__dict__: + setattr(self, k, v) + + def __setattr__(self, name, value): + if name not in self._reserved: + if not isinstance(value, str): + value = str(value) + set_key(self._dot_env_path, name, value) + self.__dict__[name] = value + + +experiment_config = AzureMLConfig() diff --git a/interpretation/deepseismic_interpretation/azureml_tools/experiment.py b/interpretation/deepseismic_interpretation/azureml_tools/experiment.py new file mode 100644 index 00000000..e2823c27 --- /dev/null +++ b/interpretation/deepseismic_interpretation/azureml_tools/experiment.py @@ -0,0 +1,283 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + + +import logging +import logging.config +import os + +import azureml.core +from azure.common.credentials import get_cli_profile +from azureml.core import Datastore, Environment +from azureml.core.compute import AmlCompute, ComputeTarget +from azureml.core.compute_target import ComputeTargetException +from azureml.core.conda_dependencies import CondaDependencies +from azureml.core.runconfig import EnvironmentDefinition +from azureml.train.dnn import Gloo, Nccl, PyTorch +from toolz import curry + +from deepseismic_interpretation.azureml_tools import workspace_for_user +from deepseismic_interpretation.azureml_tools.config import experiment_config +from deepseismic_interpretation.azureml_tools.resource_group import create_resource_group +from deepseismic_interpretation.azureml_tools.storage import create_premium_storage +from deepseismic_interpretation.azureml_tools.subscription import select_subscription + +_GPU_IMAGE = "mcr.microsoft.com/azureml/base-gpu:openmpi3.1.2-cuda10.0-cudnn7-ubuntu16.04" + + +def _create_cluster(workspace, cluster_name, vm_size, min_nodes, max_nodes): + """Creates AzureML cluster + + Args: + cluster_name (string): The name you wish to assign the cluster. + vm_size (string): The type of sku to use for your vm. + min_nodes (int): Minimum number of nodes in cluster. + Use 0 if you don't want to incur costs when it isn't being used. + max_nodes (int): Maximum number of nodes in cluster. + + """ + logger = logging.getLogger(__name__) + try: + compute_target = ComputeTarget(workspace=workspace, name=cluster_name) + logger.info("Found existing compute target.") + except ComputeTargetException: + logger.info("Creating a new compute target...") + compute_config = AmlCompute.provisioning_configuration( + vm_size=vm_size, min_nodes=min_nodes, max_nodes=max_nodes + ) + + # create the cluster + compute_target = ComputeTarget.create(workspace, cluster_name, compute_config) + compute_target.wait_for_completion(show_output=True) + + # use get_status() to get a detailed status for the current AmlCompute. + logger.debug(compute_target.get_status().serialize()) + + return compute_target + + +@curry +def _create_estimator( + estimator_class, project_folder, entry_script, compute_target, script_params, node_count, env_def, distributed, +): + logger = logging.getLogger(__name__) + + estimator = estimator_class( + project_folder, + entry_script=entry_script, + compute_target=compute_target, + script_params=script_params, + node_count=node_count, + environment_definition=env_def, + distributed_training=distributed, + ) + + logger.debug(estimator.conda_dependencies.__dict__) + return estimator + + +def _create_datastore( + aml_workspace, datastore_name, container_name, account_name, account_key, create_if_not_exists=True, +): + """Creates datastore + + Args: + datastore_name (string): Name you wish to assign to your datastore. + container_name (string): Name of your container. + account_name (string): Storage account name. + account_key (string): The storage account key. + + Returns: + azureml.core.Datastore + """ + logger = logging.getLogger(__name__) + ds = Datastore.register_azure_blob_container( + workspace=aml_workspace, + datastore_name=datastore_name, + container_name=container_name, + account_name=account_name, + account_key=account_key, + create_if_not_exists=create_if_not_exists, + ) + logger.info(f"Registered existing blob storage: {ds.name}.") + return ds + + +def _check_subscription_id(config): + if config.SUBSCRIPTION_ID is None: + profile = select_subscription() + config.SUBSCRIPTION_ID = profile.get_subscription_id() + return True, f"Selected subscription id is {config.SUBSCRIPTION_ID}" + + +_CHECK_FUNCTIONS = (_check_subscription_id,) + + +class ConfigError(Exception): + pass + + +def _check_config(config): + logger = logging.getLogger(__name__) + check_gen = (f(config) for f in _CHECK_FUNCTIONS) + check_results = list(filter(lambda state_msg: state_msg[0] == False, check_gen)) + if len(check_results) > 0: + error_msgs = "\n".join([msg for state, msg in check_results]) + msg = f"Config failed \n {error_msgs}" + logger.info(msg) + raise ConfigError(msg) + + +class BaseExperiment(object): + def __init__(self, experiment_name, config=experiment_config): + + self._logger = logging.getLogger(__name__) + self._logger.info("SDK version:" + str(azureml.core.VERSION)) + _check_config(config) + + profile = select_subscription(sub_name_or_id=config.SUBSCRIPTION_ID) + profile_credentials, subscription_id, _ = profile.get_login_credentials() + rg = create_resource_group(profile_credentials, subscription_id, config.REGION, config.RESOURCE_GROUP) + prem_str, storage_keys = create_premium_storage( + profile_credentials, subscription_id, config.REGION, config.RESOURCE_GROUP, config.ACCOUNT_NAME, + ) + + self._ws = workspace_for_user( + workspace_name=config.WORKSPACE, + resource_group=config.RESOURCE_GROUP, + subscription_id=config.SUBSCRIPTION_ID, + workspace_region=config.REGION, + ) + self._experiment = azureml.core.Experiment(self._ws, name=experiment_name) + self._cluster = _create_cluster( + self._ws, + cluster_name=config.CLUSTER_NAME, + vm_size=config.CLUSTER_VM_SIZE, + min_nodes=config.CLUSTER_MIN_NODES, + max_nodes=config.CLUSTER_MAX_NODES, + ) + + self._datastore = _create_datastore( + self._ws, + datastore_name=config.DATASTORE_NAME, + container_name=config.CONTAINER_NAME, + account_name=prem_str.name, + account_key=storage_keys["key1"], + ) + + @property + def cluster(self): + return self._cluster + + @property + def datastore(self): + return self._datastore + + +_DISTRIBUTED_DICT = {"nccl": Nccl(), "gloo": Gloo()} + + +def _get_distributed(distributed_string): + if distributed_string is not None: + return _DISTRIBUTED_DICT.get(distributed_string.lower()) + else: + return None + + +def create_environment_from_local(name="amlenv", conda_env_name=None): + """Creates environment from environment + + If no value is passed in to the conda_env_name it will simply select the + currently running environment + + Args: + name (str, optional): name of environment. Defaults to "amlenv". + conda_env_name (str, optional): name of the environment to use. Defaults to None. + + Returns: + azureml.core.Environment + """ + conda_env_name = os.getenv("CONDA_DEFAULT_ENV") if conda_env_name is None else conda_env_name + return Environment.from_existing_conda_environment(name, conda_env_name) + + +def create_environment_from_conda_file(conda_path, name="amlenv"): + """Creates environment from supplied conda file + + Args: + conda_path (str): path to conda environment file + name (str, optional): name of environment. Defaults to "amlenv". + + Returns: + azureml.core.Environment + """ + return Environment.from_existing_conda_specification(name, conda_path) + + +class PyTorchExperiment(BaseExperiment): + """Creates Experiment object that can be used to create clusters and submit experiments + + Returns: + PyTorchExperiment: PyTorchExperiment object + """ + + def _complete_datastore(self, script_params): + def _replace(value): + if isinstance(value, str) and "{datastore}" in value: + data_path = value.replace("{datastore}/", "") + return self.datastore.path(data_path).as_mount() + else: + return value + + return {key: _replace(value) for key, value in script_params.items()} + + def submit( + self, + project_folder, + entry_script, + script_params, + node_count=1, + workers_per_node=1, + distributed=None, + environment=None, + ): + """Submit experiment for remote execution on AzureML clusters. + + Args: + project_folder (string): Path of you source files for the experiment + entry_script (string): The filename of your script to run. Must be found in your project_folder + script_params (dict): Dictionary of script parameters + dependencies_file (string, optional): The location of your environment.yml to use to + create the environment your training script requires. + node_count (int, optional): [description]. + wait_for_completion (bool, optional): Whether to block until experiment is done. Defaults to True. + docker_args (tuple, optional): Docker arguments to pass. Defaults to (). + + Returns: + azureml.core.Run: AzureML Run object + """ + self._logger.debug(script_params) + + transformed_params = self._complete_datastore(script_params) + self._logger.debug("Transformed script params") + self._logger.debug(transformed_params) + + if environment is None: + environment = create_environment_from_local() + + environment.docker.shm_size = "8g" + environment.docker.base_image = _GPU_IMAGE + + estimator = _create_estimator( + PyTorch, + project_folder, + entry_script, + self.cluster, + transformed_params, + node_count, + environment, + _get_distributed(distributed), + ) + + self._logger.debug(estimator.conda_dependencies.__dict__) + return self._experiment.submit(estimator) diff --git a/interpretation/deepseismic_interpretation/azureml_tools/resource_group.py b/interpretation/deepseismic_interpretation/azureml_tools/resource_group.py new file mode 100644 index 00000000..7094c760 --- /dev/null +++ b/interpretation/deepseismic_interpretation/azureml_tools/resource_group.py @@ -0,0 +1,60 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + + +from azure.mgmt.resource import ResourceManagementClient +from azure.common.credentials import get_cli_profile +import logging + + +def _get_resource_group_client(profile_credentials, subscription_id): + return ResourceManagementClient(profile_credentials, subscription_id) + + +def resource_group_exists(resource_group_name, resource_group_client=None): + return resource_group_client.resource_groups.check_existence(resource_group_name) + + +class ResourceGroupException(Exception): + pass + + +def create_resource_group(profile_credentials, subscription_id, location, resource_group_name): + """Creates resource group if it doesn't exist + + Args: + profile_credentials : credentials from Azure login + subscription_id (str): subscription you wish to use + location (str): location you wish the strage to be created in + resource_group_name (str): the name of the resource group you want the storage to be created under + + Raises: + ResourceGroupException: Exception if the resource group could not be created + + Returns: + ResourceGroup: an Azure resource group object + + Examples: + >>> profile = get_cli_profile() + >>> profile.set_active_subscription("YOUR-SUBSCRIPTION") + >>> cred, subscription_id, _ = profile.get_login_credentials() + >>> rg = create_resource_group(cred, subscription_id, "eastus", "testrg2") + """ + logger = logging.getLogger(__name__) + resource_group_client = _get_resource_group_client(profile_credentials, subscription_id) + if resource_group_exists(resource_group_name, resource_group_client=resource_group_client): + logger.debug(f"Found resource group {resource_group_name}") + resource_group = resource_group_client.resource_groups.get(resource_group_name) + else: + logger.debug(f"Creating resource group {resource_group_name} in {location}") + resource_group_params = {"location": location} + resource_group = resource_group_client.resource_groups.create_or_update( + resource_group_name, resource_group_params + ) + + if "Succeeded" not in resource_group.properties.provisioning_state: + raise ResourceGroupException( + f"Resource group not created successfully | State {resource_group.properties.provisioning_state}" + ) + + return resource_group diff --git a/interpretation/deepseismic_interpretation/azureml_tools/storage.py b/interpretation/deepseismic_interpretation/azureml_tools/storage.py new file mode 100644 index 00000000..1e4b9844 --- /dev/null +++ b/interpretation/deepseismic_interpretation/azureml_tools/storage.py @@ -0,0 +1,63 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + + +from azure.mgmt.storage import StorageManagementClient +from azure.mgmt.storage.models import StorageAccountCreateParameters +from azure.mgmt.storage.v2019_04_01.models import Kind, Sku, SkuName + +from deepseismic_interpretation.azureml_tools.resource_group import create_resource_group + + +class StorageAccountCreateFailure(Exception): + pass + + +def create_premium_storage( + profile_credentials, subscription_id, location, resource_group_name, storage_name, +): + """Create premium blob storage + + Args: + profile_credentials : credentials from Azure login (see example below for details) + subscription_id (str): subscription you wish to use + location (str): location you wish the strage to be created in + resource_group_name (str): the name of the resource group you want the storage to be created under + storage_name (str): the name of the storage account + + Raises: + Exception: [description] + + Returns: + [type]: [description] + + Example: + >>> from azure.common.credentials import get_cli_profile + >>> profile = get_cli_profile() + >>> profile.set_active_subscription("YOUR-ACCOUNT") + >>> cred, subscription_id, _ = profile.get_login_credentials() + >>> storage = create_premium_storage(cred, subscription_id, "eastus", "testrg", "teststr", wait=False) + """ + storage_client = StorageManagementClient(profile_credentials, subscription_id) + create_resource_group(profile_credentials, subscription_id, location, resource_group_name) + if storage_client.storage_accounts.check_name_availability(storage_name).name_available == False: + storage_account = storage_client.storage_accounts.get_properties(resource_group_name, storage_name) + else: + storage_async_operation = storage_client.storage_accounts.create( + resource_group_name, + storage_name, + StorageAccountCreateParameters( + sku=Sku(name=SkuName.premium_lrs), kind=Kind.block_blob_storage, location="eastus", + ), + ) + storage_account = storage_async_operation.result() + + if "Succeeded" not in storage_account.provisioning_state: + raise StorageAccountCreateFailure( + f"Storage account not created successfully | State {storage_account.provisioning_state}" + ) + + storage_keys = storage_client.storage_accounts.list_keys(resource_group_name, storage_name) + storage_keys = {v.key_name: v.value for v in storage_keys.keys} + + return storage_account, storage_keys diff --git a/interpretation/deepseismic_interpretation/azureml_tools/subscription.py b/interpretation/deepseismic_interpretation/azureml_tools/subscription.py new file mode 100644 index 00000000..ce58c269 --- /dev/null +++ b/interpretation/deepseismic_interpretation/azureml_tools/subscription.py @@ -0,0 +1,97 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import json +import logging +import os +import subprocess +import sys + +from azure.common.client_factory import get_client_from_cli_profile +from azure.common.credentials import get_cli_profile +from azure.mgmt.resource import SubscriptionClient +from prompt_toolkit import prompt +from tabulate import tabulate +from toolz import pipe + +from knack.util import CLIError + +_GREEN = "\033[0;32m" +_BOLD = "\033[;1m" + + +def _run_az_cli_login(): + process = subprocess.Popen(["az", "login"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + for c in iter(lambda: process.stdout.read(1), b""): + sys.stdout.write(_GREEN + _BOLD + c.decode(sys.stdout.encoding)) + + +def list_subscriptions(profile=None): + """Lists the subscriptions associated with the profile + + If you don't supply a profile it will try to get the profile from your Azure CLI login + + Args: + profile (azure.cli.core._profile.Profile, optional): Profile you wish to use. Defaults to None. + + Returns: + list: list of subscriptions + """ + if profile is None: + profile = subscription_profile() + cred, _, _ = profile.get_login_credentials() + sub_client = SubscriptionClient(cred) + return [ + {"Index": i, "Name": sub.display_name, "id": sub.subscription_id} + for i, sub in enumerate(sub_client.subscriptions.list()) + ] + + +def subscription_profile(): + """Return the Azure CLI profile + + Returns: + azure.cli.core._profile.Profile: Azure profile + """ + logger = logging.getLogger(__name__) + try: + return get_cli_profile() + except CLIError: + logger.info("Not logged in, running az login") + _run_az_cli_login() + return get_cli_profile() + + +def _prompt_sub_id_selection(profile): + sub_list = list_subscriptions(profile=profile) + pipe(sub_list, tabulate, print) + prompt_result = prompt("Please type in index of subscription you want to use: ") + selected_sub = sub_list[int(prompt_result)] + print(f"You selected index {prompt_result} sub id {selected_sub['id']} name {selected_sub['Name']}") + return selected_sub["id"] + + +def select_subscription(profile=None, sub_name_or_id=None): + """Sets active subscription + + If you don't supply a profile it will try to get the profile from your Azure CLI login + If you don't supply a subscription name or id it will list ones from your account and ask you to select one + + Args: + profile (azure.cli.core._profile.Profile, optional): Profile you wish to use. Defaults to None. + sub_name_or_id (str, optional): The subscription name or id to use. Defaults to None. + + Returns: + azure.cli.core._profile.Profile: Azure profile + + Example: + >>> profile = select_subscription() + """ + if profile is None: + profile = subscription_profile() + + if sub_name_or_id is None: + sub_name_or_id = _prompt_sub_id_selection(profile) + + profile.set_active_subscription(sub_name_or_id) + return profile diff --git a/interpretation/deepseismic_interpretation/azureml_tools/workspace.py b/interpretation/deepseismic_interpretation/azureml_tools/workspace.py new file mode 100644 index 00000000..485a7874 --- /dev/null +++ b/interpretation/deepseismic_interpretation/azureml_tools/workspace.py @@ -0,0 +1,96 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + + +import logging +import os +from pathlib import Path + +import azureml +from azureml.core.authentication import ( + AuthenticationException, + AzureCliAuthentication, + InteractiveLoginAuthentication, + ServicePrincipalAuthentication, +) + +_DEFAULT_AML_PATH = "aml_config/azml_config.json" + + +def _get_auth(): + """Returns authentication to Azure Machine Learning workspace.""" + logger = logging.getLogger(__name__) + if os.environ.get("AML_SP_PASSWORD", None): + logger.debug("Trying to authenticate with Service Principal") + aml_sp_password = os.environ.get("AML_SP_PASSWORD") + aml_sp_tennant_id = os.environ.get("AML_SP_TENNANT_ID") + aml_sp_username = os.environ.get("AML_SP_USERNAME") + auth = ServicePrincipalAuthentication(aml_sp_tennant_id, aml_sp_username, aml_sp_password) + else: + logger.debug("Trying to authenticate with CLI Authentication") + try: + auth = AzureCliAuthentication() + auth.get_authentication_header() + except AuthenticationException: + logger.debug("Trying to authenticate with Interactive login") + auth = InteractiveLoginAuthentication() + + return auth + + +def create_workspace( + workspace_name, resource_group, subscription_id, workspace_region, filename="azml_config.json", +): + """Creates Azure Machine Learning workspace.""" + logger = logging.getLogger(__name__) + auth = _get_auth() + + ws = azureml.core.Workspace.create( + name=workspace_name, + subscription_id=subscription_id, + resource_group=resource_group, + location=workspace_region, + create_resource_group=True, + exist_ok=True, + auth=auth, + ) + + logger.info(ws.get_details()) + ws.write_config(file_name=filename) + return ws + + +def load_workspace(path): + """Loads Azure Machine Learning workspace from a config file.""" + auth = _get_auth() + ws = azureml.core.Workspace.from_config(auth=auth, path=path) + logger = logging.getLogger(__name__) + logger.info( + "\n".join( + [ + "Workspace name: " + str(ws.name), + "Azure region: " + str(ws.location), + "Subscription id: " + str(ws.subscription_id), + "Resource group: " + str(ws.resource_group), + ] + ) + ) + return ws + + +def workspace_for_user( + workspace_name, resource_group, subscription_id, workspace_region, config_path=_DEFAULT_AML_PATH, +): + """Returns Azure Machine Learning workspace.""" + if os.path.isfile(config_path): + return load_workspace(config_path) + else: + path_obj = Path(config_path) + filename = path_obj.name + return create_workspace( + workspace_name, + resource_group, + subscription_id=subscription_id, + workspace_region=workspace_region, + filename=filename, + ) diff --git a/interpretation/deepseismic_interpretation/data.py b/interpretation/deepseismic_interpretation/data.py new file mode 100644 index 00000000..53ec7f9f --- /dev/null +++ b/interpretation/deepseismic_interpretation/data.py @@ -0,0 +1,333 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import itertools +import math +from collections import defaultdict + +import numpy as np +import torch +from PIL import Image +from toolz import compose, curry +from toolz import partition_all +from torch.utils.data import Dataset +from torchvision.datasets.utils import iterable_to_str, verify_str_arg + +_open_to_array = compose(np.array, Image.open) + + +class DataNotSplitException(Exception): + pass + + +def _get_classes_and_counts(mask_list): + class_counts_dict = defaultdict(int) + for mask in mask_list: + for class_label, class_count in zip(*np.unique(mask, return_counts=True)): + class_counts_dict[class_label] += class_count + return list(class_counts_dict.keys()), list(class_counts_dict.values()) + + +def _combine(mask_array): + """Combine classes 2 and 3. Reduce all classes above 3 by one + """ + mask_array[np.logical_or(mask_array == 2, mask_array == 3)] = 2 + for i in filter(lambda x: x > 3, np.unique(mask_array)): + mask_array[mask_array == i] = i - 1 + return mask_array + + +def _combine_classes(mask_array_list): + """Combine classes + + Segmentation implementations using this dataset seem to combine + classes 2 and 3 so we are doing the same here and then relabeling the rest + + Args: + mask_array_list (list): list of mask (numpy.Array) + """ + return [_combine(mask_array.copy()) for mask_array in mask_array_list] + + +def _replicate_channels(image_array, n_channels): + new_image_array = np.zeros((n_channels, image_array.shape[0], image_array.shape[1])) + for i in range(n_channels): + new_image_array[i] = image_array + return new_image_array + + +def _number_patches_in(height_or_width, patch_size, stride, complete_patches_only=True): + strides_in_hw = (height_or_width - patch_size) / stride + if complete_patches_only: + return int(np.floor(strides_in_hw)) + else: + return int(np.ceil(strides_in_hw)) + + +def _is_2D(numpy_array): + return len(numpy_array.shape) == 2 + + +def _is_3D(numpy_array): + return len(numpy_array.shape) == 3 + + +@curry +def _extract_patches(patch_size, stride, complete_patches_only, img_array, mask_array): + height, width = img_array.shape[-2], img_array.shape[-1] + num_h_patches = _number_patches_in(height, patch_size, stride, complete_patches_only=complete_patches_only) + num_w_patches = _number_patches_in(width, patch_size, stride, complete_patches_only=complete_patches_only) + height_iter = range(0, stride * (num_h_patches + 1), stride) + width_iter = range(0, stride * (num_w_patches + 1), stride) + patch_locations = list(itertools.product(height_iter, width_iter)) + + image_patch_generator = _generate_patches_for(img_array, patch_locations, patch_size) + mask_patch_generator = _generate_patches_for(mask_array, patch_locations, patch_size) + return image_patch_generator, mask_patch_generator, patch_locations + + +def _generate_patches_for(numpy_array, patch_locations, patch_size): + if _is_2D(numpy_array): + generate = _generate_patches_from_2D + elif _is_3D(numpy_array): + generate = _generate_patches_from_3D + else: + raise ValueError("Array is not 2D or 3D") + return generate(numpy_array, patch_locations, patch_size) + + +def _generate_patches_from_2D(numpy_array, patch_locations, patch_size): + return (numpy_array[h : h + patch_size, w : w + patch_size].copy() for h, w in patch_locations) + + +def _generate_patches_from_3D(numpy_array, patch_locations, patch_size): + return (numpy_array[:, h : h + patch_size, w : w + patch_size].copy() for h, w in patch_locations) + + +_STATS_FUNCS = {"mean": np.mean, "std": np.std, "max": np.max} + + +def _transform_CHW_to_HWC(numpy_array): + return np.moveaxis(numpy_array, 0, -1) + + +def _transform_HWC_to_CHW(numpy_array): + return np.moveaxis(numpy_array, -1, 0) + + +def _rescale(numpy_array): + """ Rescale the numpy array by 10000. The maximum value achievable is 32737 + This will bring the values between -n and n + """ + return numpy_array / 10000 + + +def _split_train_val_test(partition, val_ratio, test_ratio): + total_samples = len(partition) + val_samples = math.floor(val_ratio * total_samples) + test_samples = math.floor(test_ratio * total_samples) + train_samples = total_samples - (val_samples + test_samples) + train_list = partition[:train_samples] + val_list = partition[train_samples : train_samples + val_samples] + test_list = partition[train_samples + val_samples : train_samples + val_samples + test_samples] + return train_list, val_list, test_list + + +class InlinePatchDataset(Dataset): + """Dataset that returns patches from the numpy dataset + + Notes: + Loads inlines only and splits into patches + """ + + _repr_indent = 4 + + def __init__( + self, + data_array, + mask_array, + patch_size, + stride, + split="train", + transforms=None, + max_inlines=None, + n_channels=1, + complete_patches_only=True, + val_ratio=0.1, + test_ratio=0.2, + ): + """Initialise Numpy Dataset + + Args: + data_array (numpy.Array): a 3D numpy array that contain the seismic info + mask_array (numpy.Array): a 3D numpy array that contains the labels + patch_size (int): the size of the patch in pixels + stride (int): the stride applied when extracting patches + split (str, optional): what split to load, (train, val, test). Defaults to `train` + transforms (albumentations.augmentations.transforms, optional): albumentation transforms to apply to patches. Defaults to None + exclude_files (list[str], optional): list of files to exclude. Defaults to None + max_inlines (int, optional): maximum number of inlines to load. Defaults to None + n_channels (int, optional): number of channels that the output should contain. Defaults to 3 + complete_patches_only (bool, optional): whether to load incomplete patches that are padded to patch_size. Defaults to True + val_ratio (float): ratio to use for validation. Defaults to 0.1 + test_ratio (float): ratio to use for test. Defaults to 0.2 + """ + + super(InlinePatchDataset, self).__init__() + self._data_array = data_array + self._slice_mask_array = mask_array + self._split = split + self._max_inlines = max_inlines + self._n_channels = n_channels + self._complete_patches_only = complete_patches_only + self._patch_size = patch_size + self._stride = stride + self._image_array = [] + self._mask_array = [] + self._ids = [] + self._patch_locations = [] + + self.transforms = transforms + + valid_modes = ("train", "test", "val") + msg = "Unknown value '{}' for argument split. " "Valid values are {{{}}}." + msg = msg.format(split, iterable_to_str(valid_modes)) + verify_str_arg(split, "split", valid_modes, msg) + + # Set the patch and stride for the patch extractor + _extract_patches_from = _extract_patches(patch_size, stride, self._complete_patches_only) + num_partitions = 5 + indexes = self._data_array.shape[0] + num_elements = math.ceil(indexes / num_partitions) + train_indexes_list = [] + test_indexes_list = [] + val_indexes_list = [] + + for partition in partition_all(num_elements, range(indexes)): # Partition files into N partitions + train_indexes, val_indexes, test_indexes = _split_train_val_test(partition, val_ratio, test_ratio) + train_indexes_list.extend(train_indexes) + test_indexes_list.extend(test_indexes) + val_indexes_list.extend(val_indexes) + + if split == "train": + indexes = train_indexes_list + elif split == "val": + indexes = val_indexes_list + elif split == "test": + indexes = test_indexes_list + + # Extract patches + for index in indexes: + img_array = self._data_array[index] + mask_array = self._slice_mask_array[index] + self._ids.append(index) + image_generator, mask_generator, patch_locations = _extract_patches_from(img_array, mask_array) + self._patch_locations.extend(patch_locations) + + self._image_array.extend(image_generator) + + self._mask_array.extend(mask_generator) + + assert len(self._image_array) == len(self._patch_locations), "The shape is not the same" + + assert len(self._patch_locations) % len(self._ids) == 0, "Something is wrong with the patches" + + self._patches_per_image = int(len(self._patch_locations) / len(self._ids)) + + self._classes, self._class_counts = _get_classes_and_counts(self._mask_array) + + def __len__(self): + return len(self._image_array) + + @property + def n_classes(self): + return len(self._classes) + + @property + def class_proportions(self): + total = np.sum(self._class_counts) + return [(i, w / total) for i, w in zip(self._classes, self._class_counts)] + + def _add_extra_channels(self, image): + if self._n_channels > 1: + image = _replicate_channels(image, self._n_channels) + return image + + def __getitem__(self, index): + image, target, ids, patch_locations = ( + self._image_array[index], + self._mask_array[index], + self._ids[index // self._patches_per_image], + self._patch_locations[index], + ) + + image = self._add_extra_channels(image) + if _is_2D(image): + image = np.expand_dims(image, 0) + + if self.transforms is not None: + image = _transform_CHW_to_HWC(image) + augmented_dict = self.transforms(image=image, mask=target) + image, target = augmented_dict["image"], augmented_dict["mask"] + image = _transform_HWC_to_CHW(image) + + target = np.expand_dims(target, 0) + + return ( + torch.from_numpy(image).float(), + torch.from_numpy(target).long(), + ids, + np.array(patch_locations), + ) + + @property + def statistics(self): + flat_image_array = np.concatenate([i.flatten() for i in self._image_array]) + stats = {stat: statfunc(flat_image_array) for stat, statfunc in _STATS_FUNCS.items()} + return "Mean: {mean} Std: {std} Max: {max}".format(**stats) + + def __repr__(self): + head = "Dataset " + self.__class__.__name__ + body = ["Number of datapoints: {}".format(self.__len__())] + body += self.extra_repr().splitlines() + if hasattr(self, "transforms") and self.transforms is not None: + body += [repr(self.transforms)] + lines = [head] + [" " * self._repr_indent + line for line in body] + return "\n".join(lines) + + def _format_transform_repr(self, transform, head): + lines = transform.__repr__().splitlines() + return ["{}{}".format(head, lines[0])] + ["{}{}".format(" " * len(head), line) for line in lines[1:]] + + def extra_repr(self): + lines = [ + "Split: {_split}", + "Patch size: {_patch_size}", + "Stride: {_stride}", + "Max inlines: {_max_inlines}", + "Num channels: {_n_channels}", + f"Num classes: {self.n_classes}", + f"Class proportions: {self.class_proportions}", + "Complete patches only: {_complete_patches_only}", + f"Dataset statistics: {self.statistics}", + ] + return "\n".join(lines).format(**self.__dict__) + + +_TRAIN_PATCH_DATASETS = {"none": InlinePatchDataset} + + +def get_patch_dataset(cfg): + """ Return the Dataset class for Numpy Array + + Args: + cfg: yacs config + + Returns: + InlinePatchDataset + """ + assert str(cfg.TRAIN.DEPTH).lower() in [ + "none" + ], f"Depth {cfg.TRAIN.DEPTH} not supported for patch data. \ + Valid values: section, patch, none." + return _TRAIN_PATCH_DATASETS.get(cfg.TRAIN.DEPTH, InlinePatchDataset) diff --git a/interpretation/deepseismic_interpretation/dutchf3/__init__.py b/interpretation/deepseismic_interpretation/dutchf3/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/interpretation/deepseismic_interpretation/dutchf3/data.py b/interpretation/deepseismic_interpretation/dutchf3/data.py new file mode 100644 index 00000000..36d69f21 --- /dev/null +++ b/interpretation/deepseismic_interpretation/dutchf3/data.py @@ -0,0 +1,823 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import itertools +import warnings +import segyio +from os import path +import scipy + +# bugfix for scipy imports +import scipy.misc +import numpy as np +import torch +from toolz import curry +from torch.utils import data + +from deepseismic_interpretation.dutchf3.utils.batch import ( + interpolate_to_fit_data, + parse_labels_in_image, + get_coordinates_for_slice, + get_grid, + augment_flip, + augment_rot_xy, + augment_rot_z, + augment_stretch, + rand_int, + trilinear_interpolation, +) + + +def _train_data_for(data_dir): + return path.join(data_dir, "train", "train_seismic.npy") + + +def _train_labels_for(data_dir): + return path.join(data_dir, "train", "train_labels.npy") + + +def _test1_data_for(data_dir): + return path.join(data_dir, "test_once", "test1_seismic.npy") + + +def _test1_labels_for(data_dir): + return path.join(data_dir, "test_once", "test1_labels.npy") + + +def _test2_data_for(data_dir): + return path.join(data_dir, "test_once", "test2_seismic.npy") + + +def _test2_labels_for(data_dir): + return path.join(data_dir, "test_once", "test2_labels.npy") + + +def readSEGY(filename): + """[summary] + Read the segy file and return the data as a numpy array and a dictionary describing what has been read in. + + Arguments: + filename {str} -- .segy file location. + + Returns: + [type] -- 3D segy data as numy array and a dictionary with metadata information + """ + + # TODO: we really need to add logging to this repo + print("Loading data cube from", filename, "with:") + + # Read full data cube + data = segyio.tools.cube(filename) + + # Put temporal axis first + data = np.moveaxis(data, -1, 0) + + # Make data cube fast to acess + data = np.ascontiguousarray(data, "float32") + + # Read meta data + segyfile = segyio.open(filename, "r") + print(" Crosslines: ", segyfile.xlines[0], ":", segyfile.xlines[-1]) + print(" Inlines: ", segyfile.ilines[0], ":", segyfile.ilines[-1]) + print(" Timeslices: ", "1", ":", data.shape[0]) + + # Make dict with cube-info + data_info = {} + data_info["crossline_start"] = segyfile.xlines[0] + data_info["inline_start"] = segyfile.ilines[0] + data_info["timeslice_start"] = 1 # Todo: read this from segy + data_info["shape"] = data.shape + # Read dt and other params needed to do create a new + + return data, data_info + + +def read_labels(fname, data_info): + """ + Read labels from an image. + + Args: + fname: filename of labelling mask (image) + data_info: dictionary describing the data + + Returns: + list of labels and list of coordinates + """ + + # Alternative writings for slice-type + inline_alias = ["inline", "in-line", "iline", "y"] + crossline_alias = ["crossline", "cross-line", "xline", "x"] + timeslice_alias = ["timeslice", "time-slice", "t", "z", "depthslice", "depth"] + + label_imgs = [] + label_coordinates = {} + + # Find image files in folder + + tmp = fname.split("/")[-1].split("_") + slice_type = tmp[0].lower() + tmp = tmp[1].split(".") + slice_no = int(tmp[0]) + + if slice_type not in inline_alias + crossline_alias + timeslice_alias: + print("File:", fname, "could not be loaded.", "Unknown slice type") + return None + + if slice_type in inline_alias: + slice_type = "inline" + if slice_type in crossline_alias: + slice_type = "crossline" + if slice_type in timeslice_alias: + slice_type = "timeslice" + + # Read file + print("Loading labels for", slice_type, slice_no, "with") + img = scipy.misc.imread(fname) + img = interpolate_to_fit_data(img, slice_type, slice_no, data_info) + label_img = parse_labels_in_image(img) + + # Get coordinates for slice + coords = get_coordinates_for_slice(slice_type, slice_no, data_info) + + # Loop through labels in label_img and append to label_coordinates + for cls in np.unique(label_img): + if cls > -1: + if str(cls) not in label_coordinates.keys(): + label_coordinates[str(cls)] = np.array(np.zeros([3, 0])) + inds_with_cls = label_img == cls + cords_with_cls = coords[:, inds_with_cls.ravel()] + label_coordinates[str(cls)] = np.concatenate((label_coordinates[str(cls)], cords_with_cls), 1) + print(" ", str(np.sum(inds_with_cls)), "labels for class", str(cls)) + if len(np.unique(label_img)) == 1: + print(" ", 0, "labels", str(cls)) + + # Add label_img to output + label_imgs.append([label_img, slice_type, slice_no]) + + return label_imgs, label_coordinates + + +def get_random_batch( + data_cube, + label_coordinates, + im_size, + batch_size, + index, + random_flip=False, + random_stretch=None, + random_rot_xy=None, + random_rot_z=None, +): + """ + Returns a batch of augmented samples with center pixels randomly drawn from label_coordinates + + Args: + data_cube: 3D numpy array with floating point velocity values + label_coordinates: 3D coordinates of the labeled training slice + im_size: size of the 3D voxel which we're cutting out around each label_coordinate + batch_size: size of the batch + index: element index of this element in a batch + random_flip: bool to perform random voxel flip + random_stretch: bool to enable random stretch + random_rot_xy: bool to enable random rotation of the voxel around dim-0 and dim-1 + random_rot_z: bool to enable random rotation around dim-2 + + Returns: + a tuple of batch numpy array array of data with dimension + (batch, 1, data_cube.shape[0], data_cube.shape[1], data_cube.shape[2]) and the associated labels as an array + of size (batch). + """ + + # always generate only one datapoint - batch_size controls class balance + num_batch_size = 1 + + # Make 3 im_size elements + if isinstance(im_size, int): + im_size = [im_size, im_size, im_size] + + # Output arrays + batch = np.zeros([num_batch_size, 1, im_size[0], im_size[1], im_size[2]]) + ret_labels = np.zeros([num_batch_size]) + + class_keys = list(label_coordinates) + n_classes = len(class_keys) + + # We seek to have a balanced batch with equally many samples from each class. + # get total number of samples per class + samples_per_class = batch_size // n_classes + # figure out index relative to zero (not sequentially counting points) + index = index - batch_size * (index // batch_size) + # figure out which class to sample for this datapoint + class_ind = index // samples_per_class + + # Start by getting a grid centered around (0,0,0) + grid = get_grid(im_size) + + # Apply random flip + if random_flip: + grid = augment_flip(grid) + + # Apply random rotations + if random_rot_xy: + grid = augment_rot_xy(grid, random_rot_xy) + if random_rot_z: + grid = augment_rot_z(grid, random_rot_z) + + # Apply random stretch + if random_stretch: + grid = augment_stretch(grid, random_stretch) + + # Pick random location from the label_coordinates for this class: + coords_for_class = label_coordinates[class_keys[class_ind]] + random_index = rand_int(0, coords_for_class.shape[1]) + coord = coords_for_class[:, random_index : random_index + 1] + + # Move grid to be centered around this location + grid += coord + + # Interpolate samples at grid from the data: + sample = trilinear_interpolation(data_cube, grid) + + # Insert in output arrays + ret_labels[0] = class_ind + batch[0, 0, :, :, :] = np.reshape(sample, (im_size[0], im_size[1], im_size[2])) + + return batch, ret_labels + + +class SectionLoader(data.Dataset): + def __init__(self, data_dir, split="train", is_transform=True, augmentations=None): + self.split = split + self.data_dir = data_dir + self.is_transform = is_transform + self.augmentations = augmentations + self.n_classes = 6 + self.sections = list() + + def __len__(self): + return len(self.sections) + + def __getitem__(self, index): + + section_name = self.sections[index] + direction, number = section_name.split(sep="_") + + if direction == "i": + im = self.seismic[int(number), :, :] + lbl = self.labels[int(number), :, :] + elif direction == "x": + im = self.seismic[:, int(number), :] + lbl = self.labels[:, int(number), :] + + im, lbl = _transform_WH_to_HW(im), _transform_WH_to_HW(lbl) + + if self.augmentations is not None: + augmented_dict = self.augmentations(image=im, mask=lbl) + im, lbl = augmented_dict["image"], augmented_dict["mask"] + + if self.is_transform: + im, lbl = self.transform(im, lbl) + + return im, lbl + + def transform(self, img, lbl): + # to be in the BxCxHxW that PyTorch uses: + lbl = np.expand_dims(lbl, 0) + if len(img.shape) == 2: + img = np.expand_dims(img, 0) + return torch.from_numpy(img).float(), torch.from_numpy(lbl).long() + + +class VoxelLoader(data.Dataset): + def __init__( + self, root_path, filename, window_size=65, split="train", n_classes=2, gen_coord_list=False, len=None, + ): + + assert split == "train" or split == "val" + + # location of the file + self.root_path = root_path + self.split = split + self.n_classes = n_classes + self.window_size = window_size + self.coord_list = None + self.filename = filename + self.full_filename = path.join(root_path, filename) + + # Read 3D cube + # NOTE: we cannot pass this data manually as serialization of data into each python process is costly, + # so each worker has to load the data on its own. + self.data, self.data_info = readSEGY(self.full_filename) + if len: + self.len = len + else: + self.len = self.data.size + self.labels = None + + if gen_coord_list: + # generate a list of coordinates to index the entire voxel + # memory footprint of this isn't large yet, so not need to wrap as a generator + nx, ny, nz = self.data.shape + x_list = range(self.window_size, nx - self.window_size) + y_list = range(self.window_size, ny - self.window_size) + z_list = range(self.window_size, nz - self.window_size) + + print("-- generating coord list --") + # TODO: is there any way to use a generator with pyTorch data loader? + self.coord_list = list(itertools.product(x_list, y_list, z_list)) + + def __len__(self): + return self.len + + def __getitem__(self, index): + + # TODO: can we specify a pixel mathematically by index? + pixel = self.coord_list[index] + x, y, z = pixel + # TODO: current bottleneck - can we slice out voxels any faster + small_cube = self.data[ + x - self.window : x + self.window + 1, + y - self.window : y + self.window + 1, + z - self.window : z + self.window + 1, + ] + + return small_cube[np.newaxis, :, :, :], pixel + + # TODO: do we need a transformer for voxels? + """ + def transform(self, img, lbl): + # to be in the BxCxHxW that PyTorch uses: + lbl = np.expand_dims(lbl, 0) + if len(img.shape) == 2: + img = np.expand_dims(img, 0) + return torch.from_numpy(img).float(), torch.from_numpy(lbl).long() + """ + + +class TrainSectionLoader(SectionLoader): + def __init__(self, data_dir, split="train", is_transform=True, augmentations=None): + super(TrainSectionLoader, self).__init__( + data_dir, split=split, is_transform=is_transform, augmentations=augmentations, + ) + + self.seismic = np.load(_train_data_for(self.data_dir)) + self.labels = np.load(_train_labels_for(self.data_dir)) + + # reading the file names for split + txt_path = path.join(self.data_dir, "splits", "section_" + split + ".txt") + file_list = tuple(open(txt_path, "r")) + file_list = [id_.rstrip() for id_ in file_list] + self.sections = file_list + + +class TrainSectionLoaderWithDepth(TrainSectionLoader): + def __init__(self, data_dir, split="train", is_transform=True, augmentations=None): + super(TrainSectionLoaderWithDepth, self).__init__( + data_dir, split=split, is_transform=is_transform, augmentations=augmentations, + ) + self.seismic = add_section_depth_channels(self.seismic) # NCWH + + def __getitem__(self, index): + + section_name = self.sections[index] + direction, number = section_name.split(sep="_") + + if direction == "i": + im = self.seismic[int(number), :, :, :] + lbl = self.labels[int(number), :, :] + elif direction == "x": + im = self.seismic[:, :, int(number), :] + lbl = self.labels[:, int(number), :] + + im = np.swapaxes(im, 0, 1) # From WCH to CWH + + im, lbl = _transform_WH_to_HW(im), _transform_WH_to_HW(lbl) + + if self.augmentations is not None: + im = _transform_CHW_to_HWC(im) + augmented_dict = self.augmentations(image=im, mask=lbl) + im, lbl = augmented_dict["image"], augmented_dict["mask"] + im = _transform_HWC_to_CHW(im) + + if self.is_transform: + im, lbl = self.transform(im, lbl) + + return im, lbl + + +class TrainVoxelWaldelandLoader(VoxelLoader): + def __init__( + self, root_path, filename, split="train", window_size=65, batch_size=None, len=None, + ): + super(TrainVoxelWaldelandLoader, self).__init__( + root_path, filename, split=split, window_size=window_size, len=len + ) + + label_fname = None + if split == "train": + label_fname = path.join(self.root_path, "inline_339.png") + elif split == "val": + label_fname = path.join(self.root_path, "inline_405.png") + else: + raise Exception("undefined split") + + self.class_imgs, self.coordinates = read_labels(label_fname, self.data_info) + + self.batch_size = batch_size if batch_size else 1 + + def __getitem__(self, index): + # print(index) + batch, labels = get_random_batch( + self.data, + self.coordinates, + self.window_size, + self.batch_size, + index, + random_flip=True, + random_stretch=0.2, + random_rot_xy=180, + random_rot_z=15, + ) + + return batch, labels + + +# TODO: write TrainVoxelLoaderWithDepth +TrainVoxelLoaderWithDepth = TrainVoxelWaldelandLoader + + +class TestSectionLoader(SectionLoader): + def __init__(self, data_dir, split="test1", is_transform=True, augmentations=None): + super(TestSectionLoader, self).__init__( + data_dir, split=split, is_transform=is_transform, augmentations=augmentations, + ) + + if "test1" in self.split: + self.seismic = np.load(_test1_data_for(self.data_dir)) + self.labels = np.load(_test1_labels_for(self.data_dir)) + elif "test2" in self.split: + self.seismic = np.load(_test2_data_for(self.data_dir)) + self.labels = np.load(_test2_labels_for(self.data_dir)) + + # We are in test mode. Only read the given split. The other one might not + # be available. + txt_path = path.join(self.data_dir, "splits", "section_" + split + ".txt") + file_list = tuple(open(txt_path, "r")) + file_list = [id_.rstrip() for id_ in file_list] + self.sections = file_list + + +class TestSectionLoaderWithDepth(TestSectionLoader): + def __init__(self, data_dir, split="test1", is_transform=True, augmentations=None): + super(TestSectionLoaderWithDepth, self).__init__( + data_dir, split=split, is_transform=is_transform, augmentations=augmentations, + ) + self.seismic = add_section_depth_channels(self.seismic) # NCWH + + def __getitem__(self, index): + + section_name = self.sections[index] + direction, number = section_name.split(sep="_") + + if direction == "i": + im = self.seismic[int(number), :, :, :] + lbl = self.labels[int(number), :, :] + elif direction == "x": + im = self.seismic[:, :, int(number), :] + lbl = self.labels[:, int(number), :] + + im = np.swapaxes(im, 0, 1) # From WCH to CWH + + im, lbl = _transform_WH_to_HW(im), _transform_WH_to_HW(lbl) + + if self.augmentations is not None: + im = _transform_CHW_to_HWC(im) + augmented_dict = self.augmentations(image=im, mask=lbl) + im, lbl = augmented_dict["image"], augmented_dict["mask"] + im = _transform_HWC_to_CHW(im) + + if self.is_transform: + im, lbl = self.transform(im, lbl) + + return im, lbl + + +class TestVoxelWaldelandLoader(VoxelLoader): + def __init__(self, data_dir, split="test"): + super(TestVoxelWaldelandLoader, self).__init__(data_dir, split=split) + + +# TODO: write TestVoxelLoaderWithDepth +TestVoxelLoaderWithDepth = TestVoxelWaldelandLoader + + +def _transform_WH_to_HW(numpy_array): + assert len(numpy_array.shape) >= 2, "This method needs at least 2D arrays" + return np.swapaxes(numpy_array, -2, -1) + + +class PatchLoader(data.Dataset): + """ + Data loader for the patch-based deconvnet + """ + + def __init__(self, data_dir, stride=30, patch_size=99, is_transform=True, augmentations=None): + self.data_dir = data_dir + self.is_transform = is_transform + self.augmentations = augmentations + self.n_classes = 6 + self.patches = list() + self.patch_size = patch_size + self.stride = stride + + def pad_volume(self, volume): + """ + Only used for train/val!! Not test. + """ + return np.pad(volume, pad_width=self.patch_size, mode="constant", constant_values=255) + + def __len__(self): + return len(self.patches) + + def __getitem__(self, index): + + patch_name = self.patches[index] + direction, idx, xdx, ddx = patch_name.split(sep="_") + + # Shift offsets the padding that is added in training + # shift = self.patch_size if "test" not in self.split else 0 + # TODO: Remember we are cancelling the shift since we no longer pad + shift = 0 + idx, xdx, ddx = int(idx) + shift, int(xdx) + shift, int(ddx) + shift + + if direction == "i": + im = self.seismic[idx, xdx : xdx + self.patch_size, ddx : ddx + self.patch_size] + lbl = self.labels[idx, xdx : xdx + self.patch_size, ddx : ddx + self.patch_size] + elif direction == "x": + im = self.seismic[idx : idx + self.patch_size, xdx, ddx : ddx + self.patch_size] + lbl = self.labels[idx : idx + self.patch_size, xdx, ddx : ddx + self.patch_size] + + im, lbl = _transform_WH_to_HW(im), _transform_WH_to_HW(lbl) + + if self.augmentations is not None: + augmented_dict = self.augmentations(image=im, mask=lbl) + im, lbl = augmented_dict["image"], augmented_dict["mask"] + + if self.is_transform: + im, lbl = self.transform(im, lbl) + return im, lbl + + def transform(self, img, lbl): + # to be in the BxCxHxW that PyTorch uses: + lbl = np.expand_dims(lbl, 0) + if len(img.shape) == 2: + img = np.expand_dims(img, 0) + return torch.from_numpy(img).float(), torch.from_numpy(lbl).long() + + +class TestPatchLoader(PatchLoader): + def __init__(self, data_dir, stride=30, patch_size=99, is_transform=True, augmentations=None): + super(TestPatchLoader, self).__init__( + data_dir, stride=stride, patch_size=patch_size, is_transform=is_transform, augmentations=augmentations, + ) + ## Warning: this is not used or tested + raise NotImplementedError("This class is not correctly implemented.") + self.seismic = np.load(_train_data_for(self.data_dir)) + self.labels = np.load(_train_labels_for(self.data_dir)) + + # We are in test mode. Only read the given split. The other one might not + # be available. + self.split = "test1" # TODO: Fix this can also be test2 + txt_path = path.join(self.data_dir, "splits", "patch_" + self.split + ".txt") + patch_list = tuple(open(txt_path, "r")) + patch_list = [id_.rstrip() for id_ in patch_list] + self.patches = patch_list + + +class TrainPatchLoader(PatchLoader): + def __init__( + self, data_dir, split="train", stride=30, patch_size=99, is_transform=True, augmentations=None, + ): + super(TrainPatchLoader, self).__init__( + data_dir, stride=stride, patch_size=patch_size, is_transform=is_transform, augmentations=augmentations, + ) + # self.seismic = self.pad_volume(np.load(seismic_path)) + # self.labels = self.pad_volume(np.load(labels_path)) + warnings.warn("This no longer pads the volume") + self.seismic = np.load(_train_data_for(self.data_dir)) + self.labels = np.load(_train_labels_for(self.data_dir)) + # We are in train/val mode. Most likely the test splits are not saved yet, + # so don't attempt to load them. + self.split = split + # reading the file names for split + txt_path = path.join(self.data_dir, "splits", "patch_" + split + ".txt") + patch_list = tuple(open(txt_path, "r")) + patch_list = [id_.rstrip() for id_ in patch_list] + self.patches = patch_list + + +class TrainPatchLoaderWithDepth(TrainPatchLoader): + def __init__( + self, data_dir, split="train", stride=30, patch_size=99, is_transform=True, augmentations=None, + ): + super(TrainPatchLoaderWithDepth, self).__init__( + data_dir, stride=stride, patch_size=patch_size, is_transform=is_transform, augmentations=augmentations, + ) + + def __getitem__(self, index): + + patch_name = self.patches[index] + direction, idx, xdx, ddx = patch_name.split(sep="_") + + # Shift offsets the padding that is added in training + # shift = self.patch_size if "test" not in self.split else 0 + # TODO: Remember we are cancelling the shift since we no longer pad + shift = 0 + idx, xdx, ddx = int(idx) + shift, int(xdx) + shift, int(ddx) + shift + + if direction == "i": + im = self.seismic[idx, xdx : xdx + self.patch_size, ddx : ddx + self.patch_size] + lbl = self.labels[idx, xdx : xdx + self.patch_size, ddx : ddx + self.patch_size] + elif direction == "x": + im = self.seismic[idx : idx + self.patch_size, xdx, ddx : ddx + self.patch_size] + lbl = self.labels[idx : idx + self.patch_size, xdx, ddx : ddx + self.patch_size] + + im, lbl = _transform_WH_to_HW(im), _transform_WH_to_HW(lbl) + + # TODO: Add check for rotation augmentations and raise warning if found + if self.augmentations is not None: + augmented_dict = self.augmentations(image=im, mask=lbl) + im, lbl = augmented_dict["image"], augmented_dict["mask"] + + im = add_patch_depth_channels(im) + + if self.is_transform: + im, lbl = self.transform(im, lbl) + return im, lbl + + +def _transform_CHW_to_HWC(numpy_array): + return np.moveaxis(numpy_array, 0, -1) + + +def _transform_HWC_to_CHW(numpy_array): + return np.moveaxis(numpy_array, -1, 0) + + +class TrainPatchLoaderWithSectionDepth(TrainPatchLoader): + def __init__( + self, data_dir, split="train", stride=30, patch_size=99, is_transform=True, augmentations=None, + ): + super(TrainPatchLoaderWithSectionDepth, self).__init__( + data_dir, + split=split, + stride=stride, + patch_size=patch_size, + is_transform=is_transform, + augmentations=augmentations, + ) + self.seismic = add_section_depth_channels(self.seismic) + + def __getitem__(self, index): + + patch_name = self.patches[index] + direction, idx, xdx, ddx = patch_name.split(sep="_") + + # Shift offsets the padding that is added in training + # shift = self.patch_size if "test" not in self.split else 0 + # TODO: Remember we are cancelling the shift since we no longer pad + shift = 0 + idx, xdx, ddx = int(idx) + shift, int(xdx) + shift, int(ddx) + shift + if direction == "i": + im = self.seismic[idx, :, xdx : xdx + self.patch_size, ddx : ddx + self.patch_size] + lbl = self.labels[idx, xdx : xdx + self.patch_size, ddx : ddx + self.patch_size] + elif direction == "x": + im = self.seismic[idx : idx + self.patch_size, :, xdx, ddx : ddx + self.patch_size] + lbl = self.labels[idx : idx + self.patch_size, xdx, ddx : ddx + self.patch_size] + im = np.swapaxes(im, 0, 1) # From WCH to CWH + + im, lbl = _transform_WH_to_HW(im), _transform_WH_to_HW(lbl) + + if self.augmentations is not None: + im = _transform_CHW_to_HWC(im) + augmented_dict = self.augmentations(image=im, mask=lbl) + im, lbl = augmented_dict["image"], augmented_dict["mask"] + im = _transform_HWC_to_CHW(im) + + if self.is_transform: + im, lbl = self.transform(im, lbl) + return im, lbl + + +_TRAIN_PATCH_LOADERS = { + "section": TrainPatchLoaderWithSectionDepth, + "patch": TrainPatchLoaderWithDepth, +} + +_TRAIN_SECTION_LOADERS = {"section": TrainSectionLoaderWithDepth} + +_TRAIN_VOXEL_LOADERS = {"voxel": TrainVoxelLoaderWithDepth} + + +def get_patch_loader(cfg): + assert cfg.TRAIN.DEPTH in [ + "section", + "patch", + "none", + ], f"Depth {cfg.TRAIN.DEPTH} not supported for patch data. \ + Valid values: section, patch, none." + return _TRAIN_PATCH_LOADERS.get(cfg.TRAIN.DEPTH, TrainPatchLoader) + + +def get_section_loader(cfg): + assert cfg.TRAIN.DEPTH in [ + "section", + "none", + ], f"Depth {cfg.TRAIN.DEPTH} not supported for section data. \ + Valid values: section, none." + return _TRAIN_SECTION_LOADERS.get(cfg.TRAIN.DEPTH, TrainSectionLoader) + + +def get_voxel_loader(cfg): + assert cfg.TRAIN.DEPTH in [ + "voxel", + "none", + ], f"Depth {cfg.TRAIN.DEPTH} not supported for section data. \ + Valid values: voxel, none." + return _TRAIN_SECTION_LOADERS.get(cfg.TRAIN.DEPTH, TrainVoxelWaldelandLoader) + + +_TEST_LOADERS = {"section": TestSectionLoaderWithDepth} + + +def get_test_loader(cfg): + return _TEST_LOADERS.get(cfg.TRAIN.DEPTH, TestSectionLoader) + + +def add_patch_depth_channels(image_array): + """Add 2 extra channels to a 1 channel numpy array + One channel is a linear sequence from 0 to 1 starting from the top of the image to the bottom + The second channel is the product of the input channel and the 'depth' channel + + Args: + image_array (np.array): 1D Numpy array + + Returns: + [np.array]: 3D numpy array + """ + h, w = image_array.shape + image = np.zeros([3, h, w]) + image[0] = image_array + for row, const in enumerate(np.linspace(0, 1, h)): + image[1, row, :] = const + image[2] = image[0] * image[1] + return image + + +def add_section_depth_channels(sections_numpy): + """Add 2 extra channels to a 1 channel section + One channel is a linear sequence from 0 to 1 starting from the top of the section to the bottom + The second channel is the product of the input channel and the 'depth' channel + + Args: + sections_numpy (numpy array): 3D Matrix (NWH)Image tensor + + Returns: + [pytorch tensor]: 3D image tensor + """ + n, w, h = sections_numpy.shape + image = np.zeros([3, n, w, h]) + image[0] = sections_numpy + for row, const in enumerate(np.linspace(0, 1, h)): + image[1, :, :, row] = const + image[2] = image[0] * image[1] + return np.swapaxes(image, 0, 1) + + +def get_seismic_labels(): + return np.asarray( + [[69, 117, 180], [145, 191, 219], [224, 243, 248], [254, 224, 144], [252, 141, 89], [215, 48, 39]] + ) + + +@curry +def decode_segmap(label_mask, n_classes=6, label_colours=get_seismic_labels()): + """Decode segmentation class labels into a colour image + Args: + label_mask (np.ndarray): an (N,H,W) array of integer values denoting + the class label at each spatial location. + Returns: + (np.ndarray): the resulting decoded color image (NCHW). + """ + r = label_mask.copy() + g = label_mask.copy() + b = label_mask.copy() + for ll in range(0, n_classes): + r[label_mask == ll] = label_colours[ll, 0] + g[label_mask == ll] = label_colours[ll, 1] + b[label_mask == ll] = label_colours[ll, 2] + rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], label_mask.shape[2], 3)) + rgb[:, :, :, 0] = r / 255.0 + rgb[:, :, :, 1] = g / 255.0 + rgb[:, :, :, 2] = b / 255.0 + return np.transpose(rgb, (0, 3, 1, 2)) diff --git a/interpretation/deepseismic_interpretation/dutchf3/utils/__init__.py b/interpretation/deepseismic_interpretation/dutchf3/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/interpretation/deepseismic_interpretation/dutchf3/utils/batch.py b/interpretation/deepseismic_interpretation/dutchf3/utils/batch.py new file mode 100644 index 00000000..8ebc6790 --- /dev/null +++ b/interpretation/deepseismic_interpretation/dutchf3/utils/batch.py @@ -0,0 +1,504 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import numpy as np +import scipy + + +def get_coordinates_for_slice(slice_type, slice_no, data_info): + """ + + Get coordinates for slice in the full cube + + Args: + slice_type: type of slice, e.g. inline, crossline, etc + slice_no: slice number + data_info: data dictionary array + + Returns: + index coordinates of the voxel + + """ + ds = data_info["shape"] + + # Coordinates for cube + x0, x1, x2 = np.meshgrid( + np.linspace(0, ds[0] - 1, ds[0]), + np.linspace(0, ds[1] - 1, ds[1]), + np.linspace(0, ds[2] - 1, ds[2]), + indexing="ij", + ) + if slice_type == "inline": + start = data_info["inline_start"] + slice_no = slice_no - start + + x0 = x0[:, slice_no, :] + x1 = x1[:, slice_no, :] + x2 = x2[:, slice_no, :] + elif slice_type == "crossline": + start = data_info["crossline_start"] + slice_no = slice_no - start + x0 = x0[:, :, slice_no] + x1 = x1[:, :, slice_no] + x2 = x2[:, :, slice_no] + + elif slice_type == "timeslice": + start = data_info["timeslice_start"] + slice_no = slice_no - start + x0 = x0[slice_no, :, :] + x1 = x1[slice_no, :, :] + x2 = x2[slice_no, :, :] + + # Collect indexes + x0 = np.expand_dims(x0.ravel(), 0) + x1 = np.expand_dims(x1.ravel(), 0) + x2 = np.expand_dims(x2.ravel(), 0) + coords = np.concatenate((x0, x1, x2), axis=0) + + return coords + + +def parse_labels_in_image(img): + """ + Convert RGB image to class img. + + Args: + img: 3-channel image array + + Returns: + monotonically increasing class labels + """ + + # Add colors to this table to make it possible to have more classes + class_color_coding = [ + [0, 0, 255], # blue + [0, 255, 0], # green + [0, 255, 255], # cyan + [255, 0, 0], # red + [255, 0, 255], # blue + [255, 255, 0], # yellow + ] + + label_img = np.int16(img[:, :, 0]) * 0 - 1 # -1 = no class + + # decompose color channels (#Alpha is ignored) + r = img[:, :, 0] + g = img[:, :, 1] + b = img[:, :, 2] + + # Alpha channel + if img.shape[2] == 4: + a = 1 - img.shape[2] // 255 + r = r * a + g = g * a + b = b * a + + tolerance = 1 + # Go through classes and find pixels with this class + cls = 0 + for color in class_color_coding: + # Find pixels with these labels + inds = ( + (np.abs(r - color[0]) < tolerance) & (np.abs(g - color[1]) < tolerance) & (np.abs(b - color[2]) < tolerance) + ) + label_img[inds] = cls + cls += 1 + + return label_img + + +def interpolate_to_fit_data(img, slice_type, slice_no, data_info): + """ + Function to resize image if needed + + Args: + img: image array + slice_type: inline, crossline or timeslice slice type + slice_no: slice number + data_info: data info dictionary distracted from SEGY file + + Returns: + resized image array + + """ + + # Get wanted output size + if slice_type == "inline": + n0 = data_info["shape"][0] + n1 = data_info["shape"][2] + elif slice_type == "crossline": + n0 = data_info["shape"][0] + n1 = data_info["shape"][1] + elif slice_type == "timeslice": + n0 = data_info["shape"][1] + n1 = data_info["shape"][2] + return scipy.misc.imresize(img, (n0, n1), interp="nearest") + + +def get_grid(im_size): + """ + getGrid returns z,x,y coordinates centered around (0,0,0) + + Args: + im_size: size of window + + Returns + numpy int array with size: 3 x im_size**3 + """ + win0 = np.linspace(-im_size[0] // 2, im_size[0] // 2, im_size[0]) + win1 = np.linspace(-im_size[1] // 2, im_size[1] // 2, im_size[1]) + win2 = np.linspace(-im_size[2] // 2, im_size[2] // 2, im_size[2]) + + x0, x1, x2 = np.meshgrid(win0, win1, win2, indexing="ij") + + ex0 = np.expand_dims(x0.ravel(), 0) + ex1 = np.expand_dims(x1.ravel(), 0) + ex2 = np.expand_dims(x2.ravel(), 0) + + grid = np.concatenate((ex0, ex1, ex2), axis=0) + + return grid + + +def augment_flip(grid): + """ + Random flip of non-depth axes. + + Args: + grid: 3D coordinates of the voxel + + Returns: + flipped grid coordinates + """ + + # Flip x axis + if rand_bool(): + grid[1, :] = -grid[1, :] + + # Flip y axis + if rand_bool(): + grid[2, :] = -grid[2, :] + + return grid + + +def augment_stretch(grid, stretch_factor): + """ + Random stretch/scale + + Args: + grid: 3D coordinate grid of the voxel + stretch_factor: this is actually a boolean which triggers stretching + TODO: change this to just call the function and not do -1,1 in rand_float + + Returns: + stretched grid coordinates + """ + stretch = rand_float(-stretch_factor, stretch_factor) + grid *= 1 + stretch + return grid + + +def augment_rot_xy(grid, random_rot_xy): + """ + Random rotation + + Args: + grid: coordinate grid list of 3D points + random_rot_xy: this is actually a boolean which triggers rotation + TODO: change this to just call the function and not do -1,1 in rand_float + + Returns: + randomly rotated grid + """ + theta = np.deg2rad(rand_float(-random_rot_xy, random_rot_xy)) + x = grid[2, :] * np.cos(theta) - grid[1, :] * np.sin(theta) + y = grid[2, :] * np.sin(theta) + grid[1, :] * np.cos(theta) + grid[1, :] = x + grid[2, :] = y + return grid + + +def augment_rot_z(grid, random_rot_z): + """ + Random tilt around z-axis (dim-2) + + Args: + grid: coordinate grid list of 3D points + random_rot_z: this is actually a boolean which triggers rotation + TODO: change this to just call the function and not do -1,1 in rand_float + + Returns: + randomly tilted coordinate grid + """ + theta = np.deg2rad(rand_float(-random_rot_z, random_rot_z)) + z = grid[0, :] * np.cos(theta) - grid[1, :] * np.sin(theta) + x = grid[0, :] * np.sin(theta) + grid[1, :] * np.cos(theta) + grid[0, :] = z + grid[1, :] = x + return grid + + +def trilinear_interpolation(input_array, indices): + """ + Linear interpolation + code taken from + http://stackoverflow.com/questions/6427276/3d-interpolation-of-numpy-arrays-without-scipy + + Args: + input_array: 3D data array + indices: 3D grid coordinates + + Returns: + interpolated input array + """ + + x_indices, y_indices, z_indices = indices[0:3] + + n0, n1, n2 = input_array.shape + + x0 = x_indices.astype(np.integer) + y0 = y_indices.astype(np.integer) + z0 = z_indices.astype(np.integer) + x1 = x0 + 1 + y1 = y0 + 1 + z1 = z0 + 1 + + # put all samples outside datacube to 0 + inds_out_of_range = ( + (x0 < 0) + | (x1 < 0) + | (y0 < 0) + | (y1 < 0) + | (z0 < 0) + | (z1 < 0) + | (x0 >= n0) + | (x1 >= n0) + | (y0 >= n1) + | (y1 >= n1) + | (z0 >= n2) + | (z1 >= n2) + ) + + x0[inds_out_of_range] = 0 + y0[inds_out_of_range] = 0 + z0[inds_out_of_range] = 0 + x1[inds_out_of_range] = 0 + y1[inds_out_of_range] = 0 + z1[inds_out_of_range] = 0 + + x = x_indices - x0 + y = y_indices - y0 + z = z_indices - z0 + output = ( + input_array[x0, y0, z0] * (1 - x) * (1 - y) * (1 - z) + + input_array[x1, y0, z0] * x * (1 - y) * (1 - z) + + input_array[x0, y1, z0] * (1 - x) * y * (1 - z) + + input_array[x0, y0, z1] * (1 - x) * (1 - y) * z + + input_array[x1, y0, z1] * x * (1 - y) * z + + input_array[x0, y1, z1] * (1 - x) * y * z + + input_array[x1, y1, z0] * x * y * (1 - z) + + input_array[x1, y1, z1] * x * y * z + ) + + output[inds_out_of_range] = 0 + return output + + +def rand_float(low, high): + """ + Generate random floating point number between two limits + + Args: + low: low limit + high: high limit + + Returns: + single random floating point number + """ + return (high - low) * np.random.random_sample() + low + + +def rand_int(low, high): + """ + Generate random integer between two limits + + Args: + low: low limit + high: high limit + + Returns: + random integer between two limits + """ + return np.random.randint(low, high) + + +def rand_bool(): + """ + Generate random boolean. + + Returns: + Random boolean + """ + return bool(np.random.randint(0, 2)) + + +def augment_stretch(grid, stretch_factor): + """ + Random stretch/scale + + Args: + grid: 3D coordinate grid of the voxel + stretch_factor: this is actually a boolean which triggers stretching + TODO: change this to just call the function and not do -1,1 in rand_float + + Returns: + stretched grid coordinates + """ + stretch = rand_float(-stretch_factor, stretch_factor) + grid *= 1 + stretch + return grid + + +def augment_rot_xy(grid, random_rot_xy): + """ + Random rotation + + Args: + grid: coordinate grid list of 3D points + random_rot_xy: this is actually a boolean which triggers rotation + TODO: change this to just call the function and not do -1,1 in rand_float + + Returns: + randomly rotated grid + """ + theta = np.deg2rad(rand_float(-random_rot_xy, random_rot_xy)) + x = grid[2, :] * np.cos(theta) - grid[1, :] * np.sin(theta) + y = grid[2, :] * np.sin(theta) + grid[1, :] * np.cos(theta) + grid[1, :] = x + grid[2, :] = y + return grid + + +def augment_rot_z(grid, random_rot_z): + """ + Random tilt around z-axis (dim-2) + + Args: + grid: coordinate grid list of 3D points + random_rot_z: this is actually a boolean which triggers rotation + TODO: change this to just call the function and not do -1,1 in rand_float + + Returns: + randomly tilted coordinate grid + """ + theta = np.deg2rad(rand_float(-random_rot_z, random_rot_z)) + z = grid[0, :] * np.cos(theta) - grid[1, :] * np.sin(theta) + x = grid[0, :] * np.sin(theta) + grid[1, :] * np.cos(theta) + grid[0, :] = z + grid[1, :] = x + return grid + + +def trilinear_interpolation(input_array, indices): + """ + Linear interpolation + code taken from + http://stackoverflow.com/questions/6427276/3d-interpolation-of-numpy-arrays-without-scipy + + Args: + input_array: 3D data array + indices: 3D grid coordinates + + Returns: + interpolated input array + """ + + x_indices, y_indices, z_indices = indices[0:3] + + n0, n1, n2 = input_array.shape + + x0 = x_indices.astype(np.integer) + y0 = y_indices.astype(np.integer) + z0 = z_indices.astype(np.integer) + x1 = x0 + 1 + y1 = y0 + 1 + z1 = z0 + 1 + + # put all samples outside datacube to 0 + inds_out_of_range = ( + (x0 < 0) + | (x1 < 0) + | (y0 < 0) + | (y1 < 0) + | (z0 < 0) + | (z1 < 0) + | (x0 >= n0) + | (x1 >= n0) + | (y0 >= n1) + | (y1 >= n1) + | (z0 >= n2) + | (z1 >= n2) + ) + + x0[inds_out_of_range] = 0 + y0[inds_out_of_range] = 0 + z0[inds_out_of_range] = 0 + x1[inds_out_of_range] = 0 + y1[inds_out_of_range] = 0 + z1[inds_out_of_range] = 0 + + x = x_indices - x0 + y = y_indices - y0 + z = z_indices - z0 + output = ( + input_array[x0, y0, z0] * (1 - x) * (1 - y) * (1 - z) + + input_array[x1, y0, z0] * x * (1 - y) * (1 - z) + + input_array[x0, y1, z0] * (1 - x) * y * (1 - z) + + input_array[x0, y0, z1] * (1 - x) * (1 - y) * z + + input_array[x1, y0, z1] * x * (1 - y) * z + + input_array[x0, y1, z1] * (1 - x) * y * z + + input_array[x1, y1, z0] * x * y * (1 - z) + + input_array[x1, y1, z1] * x * y * z + ) + + output[inds_out_of_range] = 0 + return output + + +def rand_float(low, high): + """ + Generate random floating point number between two limits + + Args: + low: low limit + high: high limit + + Returns: + single random floating point number + """ + return (high - low) * np.random.random_sample() + low + + +def rand_int(low, high): + """ + Generate random integer between two limits + + Args: + low: low limit + high: high limit + + Returns: + random integer between two limits + """ + return np.random.randint(low, high) + + +def rand_bool(): + """ + Generate random boolean. + + Returns: + Random boolean + """ + return bool(np.random.randint(0, 2)) diff --git a/interpretation/deepseismic_interpretation/models/__init__.py b/interpretation/deepseismic_interpretation/models/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/interpretation/deepseismic_interpretation/models/texture_net.py b/interpretation/deepseismic_interpretation/models/texture_net.py new file mode 100644 index 00000000..da5371d5 --- /dev/null +++ b/interpretation/deepseismic_interpretation/models/texture_net.py @@ -0,0 +1,158 @@ +# Copyright (c) Microsoft. All rights reserved. +# Licensed under the MIT license. + +# code modified from https://github.com/waldeland/CNN-for-ASI + +import torch +from torch import nn + +# TODO; set chanels from yaml config file +class TextureNet(nn.Module): + def __init__(self, n_classes=2): + super(TextureNet, self).__init__() + + # Network definition + # Parameters #in_channels, #out_channels, filter_size, stride (downsampling factor) + self.net = nn.Sequential( + nn.Conv3d(1, 50, 5, 4, padding=2), + nn.BatchNorm3d(50), + # nn.Dropout3d() #Droput can be added like this ... + nn.ReLU(), + nn.Conv3d(50, 50, 3, 2, padding=1, bias=False), + nn.BatchNorm3d(50), + nn.ReLU(), + nn.Conv3d(50, 50, 3, 2, padding=1, bias=False), + nn.BatchNorm3d(50), + nn.ReLU(), + nn.Conv3d(50, 50, 3, 2, padding=1, bias=False), + nn.BatchNorm3d(50), + nn.ReLU(), + nn.Conv3d(50, 50, 3, 3, padding=1, bias=False), + nn.BatchNorm3d(50), + nn.ReLU(), + nn.Conv3d( + 50, n_classes, 1, 1 + ), # This is the equivalent of a fully connected layer since input has width/height/depth = 1 + nn.ReLU(), + ) + # The filter weights are by default initialized by random + + def forward(self, x): + """ + Is called to compute network output + + Args: + x: network input - torch tensor + + Returns: + output from the neural network + + """ + return self.net(x) + + def classify(self, x): + """ + Classification wrapper + + Args: + x: input tensor for classification + + Returns: + classification result + + """ + x = self.net(x) + _, class_no = torch.max(x, 1, keepdim=True) + return class_no + + # Functions to get output from intermediate feature layers + def f1(self, x): + """ + Wrapper to obtain a particular network layer + + Args: + x: input tensor for classification + + Returns: + requested layer + + """ + return self.getFeatures(x, 0) + + def f2(self, x): + """ + Wrapper to obtain a particular network layer + + Args: + x: input tensor for classification + + Returns: + requested layer + + """ + return self.getFeatures(x, 1) + + def f3(self, x): + """ + Wrapper to obtain a particular network layer + + Args: + x: input tensor for classification + + Returns: + requested layer + + """ + return self.getFeatures(x, 2) + + def f4(self, x): + """ + Wrapper to obtain a particular network layer + + Args: + x: input tensor for classification + + Returns: + requested layer + + """ + return self.getFeatures(x, 3) + + def f5(self, x): + """ + Wrapper to obtain a particular network layer + + Args: + x: input tensor for classification + + Returns: + requested layer + + """ + return self.getFeatures(x, 4) + + def getFeatures(self, x, layer_no): + """ + Main call method to call the wrapped layers + + Args: + x: input tensor for classification + layer_no: number of hidden layer we want to extract + + Returns: + requested layer + + """ + layer_indexes = [0, 3, 6, 9, 12] + + # Make new network that has the layers up to the requested output + tmp_net = nn.Sequential() + layers = list(self.net.children())[0 : layer_indexes[layer_no] + 1] + for i in range(len(layers)): + tmp_net.add_module(str(i), layers[i]) + return tmp_net(x) + + +def get_seg_model(cfg, **kwargs): + model = TextureNet(n_classes=cfg.DATASET.NUM_CLASSES) + return model diff --git a/interpretation/deepseismic_interpretation/penobscot/__init__.py b/interpretation/deepseismic_interpretation/penobscot/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/interpretation/deepseismic_interpretation/penobscot/data.py b/interpretation/deepseismic_interpretation/penobscot/data.py new file mode 100644 index 00000000..7afebf57 --- /dev/null +++ b/interpretation/deepseismic_interpretation/penobscot/data.py @@ -0,0 +1,510 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import glob +import itertools +import os +import random +import warnings +from builtins import FileNotFoundError +from collections import defaultdict +from itertools import filterfalse + +import numpy as np +import torch +from PIL import Image +from toolz import compose, take, curry +from toolz import pipe +from torchvision.datasets.utils import iterable_to_str, verify_str_arg +from torchvision.datasets.vision import VisionDataset + +_open_to_array = compose(np.array, Image.open) + + +class DataNotSplitException(Exception): + pass + + +@curry +def _pad_right_and_bottom(pad_size, numpy_array, pad_value=255): + assert ( + len(numpy_array.shape) == 2 + ), f"_pad_right_and_bottom only accepts 2D arrays. Input is {len(numpy_array.shape)}D" + return np.pad(numpy_array, pad_width=[(0, pad_size), (0, pad_size)], constant_values=pad_value) + + +def _get_classes_and_counts(mask_list): + class_counts_dict = defaultdict(int) + for mask in mask_list: + for class_label, class_count in zip(*np.unique(mask, return_counts=True)): + class_counts_dict[class_label] += class_count + return list(class_counts_dict.keys()), list(class_counts_dict.values()) + + +def _combine(mask_array): + """Combine classes 2 and 3. Reduce all classes above 3 by one + """ + mask_array[np.logical_or(mask_array == 2, mask_array == 3)] = 2 + for i in filter(lambda x: x > 3, np.unique(mask_array)): + mask_array[mask_array == i] = i - 1 + return mask_array + + +def _combine_classes(mask_array_list): + """Combine classes + + Segmentation implementations using this dataset seem to combine + classes 2 and 3 so we are doing the same here and then relabeling the rest + + Args: + mask_array_list (list): list of mask (numpy.Array) + """ + return [_combine(mask_array.copy()) for mask_array in mask_array_list] + + +def _replicate_channels(image_array, n_channels): + new_image_array = np.zeros((n_channels, image_array.shape[0], image_array.shape[1])) + for i in range(n_channels): + new_image_array[i] = image_array + return new_image_array + + +def _extract_filename(filepath): + return os.path.splitext(os.path.split(filepath)[-1].strip())[0] # extract filename without extension + + +def _generate_images_and_masks(images_iter, mask_dir): + for image_file in images_iter: + file_part = _extract_filename(image_file) + mask_file = os.path.join(mask_dir, file_part + "_mask.png") + if os.path.exists(mask_file): + yield image_file, mask_file + else: + raise FileNotFoundError(f"Could not find mask {mask_file} corresponding to {image_file}") + + +def _number_patches_in(height_or_width, patch_size, stride, complete_patches_only=True): + strides_in_hw = (height_or_width - patch_size) / stride + if complete_patches_only: + return int(np.floor(strides_in_hw)) + else: + return int(np.ceil(strides_in_hw)) + + +def _is_2D(numpy_array): + return len(numpy_array.shape) == 2 + + +def _is_3D(numpy_array): + return len(numpy_array.shape) == 3 + + +@curry +def _extract_patches(patch_size, stride, complete_patches_only, img_array, mask_array): + height, width = img_array.shape[-2], img_array.shape[-1] + num_h_patches = _number_patches_in(height, patch_size, stride, complete_patches_only=complete_patches_only) + num_w_patches = _number_patches_in(width, patch_size, stride, complete_patches_only=complete_patches_only) + height_iter = range(0, stride * (num_h_patches + 1), stride) + width_iter = range(0, stride * (num_w_patches + 1), stride) + patch_locations = list(itertools.product(height_iter, width_iter)) + + image_patch_generator = _generate_patches_for(img_array, patch_locations, patch_size) + mask_patch_generator = _generate_patches_for(mask_array, patch_locations, patch_size) + return image_patch_generator, mask_patch_generator, patch_locations + + +def _generate_patches_for(numpy_array, patch_locations, patch_size): + if _is_2D(numpy_array): + generate = _generate_patches_from_2D + elif _is_3D(numpy_array): + generate = _generate_patches_from_3D + else: + raise ValueError("Array is not 2D or 3D") + return generate(numpy_array, patch_locations, patch_size) + + +def _generate_patches_from_2D(numpy_array, patch_locations, patch_size): + return (numpy_array[h : h + patch_size, w : w + patch_size].copy() for h, w in patch_locations) + + +def _generate_patches_from_3D(numpy_array, patch_locations, patch_size): + return (numpy_array[:, h : h + patch_size, w : w + patch_size].copy() for h, w in patch_locations) + + +@curry +def _filter_files(exclude_files, images_iter): + if exclude_files is not None: + images_iter = filterfalse(lambda x: x in exclude_files, images_iter) + + return images_iter + + +@curry +def _limit_inlines(max_inlines, images_iter): + if max_inlines is not None: + images_list = list(images_iter) + if max_inlines > len(images_list): + warn_msg = ( + f"The number of max inlines {max_inlines} is greater" + f"than the number of inlines found {len(images_list)}." + f"Setting max inlines to {len(images_list)}" + ) + warnings.warning(warn_msg) + max_inlines = len(images_list) + images_iter = images_list + else: + shuffled_list = random.shuffle(images_list) + images_iter = take(max_inlines, shuffled_list) + return images_iter, max_inlines + + +_STATS_FUNCS = {"mean": np.mean, "std": np.std, "max": np.max} + + +def _transform_CHW_to_HWC(numpy_array): + return np.moveaxis(numpy_array, 0, -1) + + +def _transform_HWC_to_CHW(numpy_array): + return np.moveaxis(numpy_array, -1, 0) + + +def _rescale(numpy_array): + """ Rescale the numpy array by 10000. The maximum value achievable is 32737. + This will bring the values between -n and n + """ + return numpy_array / 10000 + + +class PenobscotInlinePatchDataset(VisionDataset): + """Dataset that returns patches from Penobscot dataset + + Notes: + Loads inlines only and splits into patches + """ + + def __init__( + self, + root, + patch_size, + stride, + split="train", + transforms=None, + exclude_files=None, + max_inlines=None, + n_channels=1, + complete_patches_only=True, + ): + """Initialise Penobscot Dataset + + Args: + root (str): root directory to load data from + patch_size (int): the size of the patch in pixels + stride (int): the stride applied when extracting patches + split (str, optional): what split to load, (train, val, test). Defaults to `train` + transforms (albumentations.augmentations.transforms, optional): albumentation transforms to apply to patches. Defaults to None + exclude_files (list[str], optional): list of files to exclude. Defaults to None + max_inlines (int, optional): maximum number of inlines to load. Defaults to None + n_channels (int, optional): number of channels that the output should contain. Defaults to 3 + complete_patches_only (bool, optional): whether to load incomplete patches that are padded to patch_size. Defaults to True + """ + + super(PenobscotInlinePatchDataset, self).__init__(root, transforms=transforms) + self._image_dir = os.path.join(self.root, "inlines", split) + self._mask_dir = os.path.join(self.root, "masks") + self._split = split + self._exclude_files = exclude_files + self._max_inlines = max_inlines + self._n_channels = n_channels + self._complete_patches_only = complete_patches_only + self._patch_size = patch_size + self._stride = stride + self._image_array = [] + self._mask_array = [] + self._file_ids = [] + self._patch_locations = [] + + valid_modes = ("train", "test", "val") + msg = "Unknown value '{}' for argument split. " "Valid values are {{{}}}." + msg = msg.format(split, iterable_to_str(valid_modes)) + verify_str_arg(split, "split", valid_modes, msg) + + if not os.path.exists(self._image_dir): + raise DataNotSplitException( + f"Directory {self._image_dir} does not exist. The dataset has not been \ + appropriately split into train, val and test." + ) + + # Get the number of inlines that make up dataset + images_iter, self._max_inlines = pipe( + os.path.join(self._image_dir, "*.tiff"), + glob.iglob, + _filter_files(self._exclude_files), + _limit_inlines(self._max_inlines), + ) + + # Set the patch and stride for the patch extractor + _extract_patches_from = _extract_patches(patch_size, stride, self._complete_patches_only) + + # Extract patches + for image_path, mask_path in _generate_images_and_masks(images_iter, self._mask_dir): + img_array = self._open_image(image_path) + mask_array = self._open_mask(mask_path) + self._file_ids.append(_extract_filename(image_path)) + image_generator, mask_generator, patch_locations = _extract_patches_from(img_array, mask_array) + self._patch_locations.extend(patch_locations) + + self._image_array.extend(image_generator) + + self._mask_array.extend(mask_generator) + + assert len(self._image_array) == len(self._patch_locations), "The shape is not the same" + + assert len(self._patch_locations) % len(self._file_ids) == 0, "Something is wrong with the patches" + + self._patches_per_image = int(len(self._patch_locations) / len(self._file_ids)) + + # Combine classes 2 and 3 + self._mask_array = _combine_classes(self._mask_array) + + self._classes, self._class_counts = _get_classes_and_counts(self._mask_array) + + def _open_image(self, image_path): + return pipe(image_path, _open_to_array, _rescale) + + def _open_mask(self, mask_path): + return pipe(mask_path, _open_to_array) + + def __len__(self): + return len(self._image_array) + + @property + def n_classes(self): + return len(self._classes) + + @property + def class_proportions(self): + total = np.sum(self._class_counts) + return [(i, w / total) for i, w in zip(self._classes, self._class_counts)] + + def _add_extra_channels(self, image): + if self._n_channels > 1: + image = _replicate_channels(image, self._n_channels) + return image + + def __getitem__(self, index): + image, target, file_ids, patch_locations = ( + self._image_array[index], + self._mask_array[index], + self._file_ids[index // self._patches_per_image], + self._patch_locations[index], + ) + + image = self._add_extra_channels(image) + if _is_2D(image): + image = np.expand_dims(image, 0) + + if self.transforms is not None: + image = _transform_CHW_to_HWC(image) + augmented_dict = self.transforms(image=image, mask=target) + image, target = augmented_dict["image"], augmented_dict["mask"] + image = _transform_HWC_to_CHW(image) + + target = np.expand_dims(target, 0) + + return ( + torch.from_numpy(image).float(), + torch.from_numpy(target).long(), + file_ids, + np.array(patch_locations), + ) + + @property + def statistics(self): + flat_image_array = np.concatenate([i.flatten() for i in self._image_array]) + stats = {stat: statfunc(flat_image_array) for stat, statfunc in _STATS_FUNCS.items()} + return "Mean: {mean} Std: {std} Max: {max}".format(**stats) + + def extra_repr(self): + lines = [ + "Split: {_split}", + "Image Dir: {_image_dir}", + "Mask Dir: {_mask_dir}", + "Exclude files: {_exclude_files}", + "Patch size: {_patch_size}", + "Stride: {_stride}", + "Max inlines: {_max_inlines}", + "Num channels: {_n_channels}", + f"Num classes: {self.n_classes}", + f"Class proportions: {self.class_proportions}", + "Complete patches only: {_complete_patches_only}", + f"Dataset statistics: {self.statistics}", + ] + return "\n".join(lines).format(**self.__dict__) + + +def add_depth_channels(image_array): + """Add 2 extra channels to a 1 channel numpy array + One channel is a linear sequence from 0 to 1 starting from the top of the image to the bottom + The second channel is the product of the input channel and the 'depth' channel + + Args: + image_array (numpy.Array): 2D Numpy array + + Returns: + [np.array]: 3D numpy array + """ + h, w = image_array.shape + image = np.zeros([3, h, w]) + image[0] = image_array + for row, const in enumerate(np.linspace(0, 1, h)): + image[1, row, :] = const + image[2] = image[0] * image[1] + return image + + +class PenobscotInlinePatchSectionDepthDataset(PenobscotInlinePatchDataset): + """Dataset that returns patches from Penobscot dataset augmented with Section depth + + Notes: + Loads inlines only and splits into patches + The patches are augmented with section depth + """ + + def __init__( + self, + root, + patch_size, + stride, + split="train", + transforms=None, + exclude_files=None, + max_inlines=None, + n_channels=3, + complete_patches_only=True, + ): + """Initialise Penobscot Dataset + + Args: + root (str): root directory to load data from + patch_size (int): the size of the patch in pixels + stride (int): the stride applied when extracting patches + split (str, optional): what split to load, (train, val, test). Defaults to `train` + transforms (albumentations.augmentations.transforms, optional): albumentation transforms + to apply to patches. + Defaults to None + exclude_files (list[str], optional): list of files to exclude. Defaults to None + max_inlines (int, optional): maximum number of inlines to load. Defaults to None + n_channels (int, optional): number of channels that the output should contain. Defaults to 3 + complete_patches_only (bool, optional): whether to load incomplete patches + that are padded to patch_size. Defaults to True + """ + + assert n_channels == 3, ( + f"For the Section Depth based dataset the number of channels can only be 3." + f"Currently n_channels={n_channels}" + ) + super(PenobscotInlinePatchSectionDepthDataset, self).__init__( + root, + patch_size, + stride, + split=split, + transforms=transforms, + exclude_files=exclude_files, + max_inlines=max_inlines, + n_channels=n_channels, + complete_patches_only=complete_patches_only, + ) + + def _open_image(self, image_path): + return pipe(image_path, _open_to_array, _rescale, add_depth_channels) + + def _add_extra_channels(self, image): + return image + + +class PenobscotInlinePatchDepthDataset(PenobscotInlinePatchDataset): + """Dataset that returns patches from Penobscot dataset augmented with patch depth + + Notes: + Loads inlines only and splits into patches + The patches are augmented with patch depth + """ + + def __init__( + self, + root, + patch_size, + stride, + split="train", + transforms=None, + exclude_files=None, + max_inlines=None, + n_channels=3, + complete_patches_only=True, + ): + """Initialise Penobscot Dataset + + Args: + root (str): root directory to load data from + patch_size (int): the size of the patch in pixels + stride (int): the stride applied when extracting patches + split (str, optional): what split to load, (train, val, test). Defaults to `train` + transforms (albumentations.augmentations.transforms, optional): albumentation transforms + to apply to patches. + Defaults to None + exclude_files (list[str], optional): list of files to exclude. Defaults to None + max_inlines (int, optional): maximum number of inlines to load. Defaults to None + n_channels (int, optional): number of channels that the output should contain. Defaults to 3 + complete_patches_only (bool, optional): whether to load incomplete patches that are + padded to patch_size. Defaults to True + """ + assert ( + n_channels == 3 + ), f"For the Patch Depth based dataset the number of channels can only be 3. Currently n_channels={n_channels}" + super(PenobscotInlinePatchDepthDataset, self).__init__( + root, + patch_size, + stride, + split=split, + transforms=transforms, + exclude_files=exclude_files, + max_inlines=max_inlines, + n_channels=n_channels, + complete_patches_only=complete_patches_only, + ) + + def _open_image(self, image_path): + return pipe(image_path, _open_to_array, _rescale) + + def _add_extra_channels(self, image): + return add_depth_channels(image) + + +_TRAIN_PATCH_DATASETS = { + "section": PenobscotInlinePatchSectionDepthDataset, + "patch": PenobscotInlinePatchDepthDataset, +} + + +def get_patch_dataset(cfg): + """ Return the Dataset class for Penobscot + + Args: + cfg: yacs config + + Returns: + PenobscotInlinePatchDataset + """ + assert str(cfg.TRAIN.DEPTH).lower() in [ + "section", + "patch", + "none", + ], f"Depth {cfg.TRAIN.DEPTH} not supported for patch data. \ + Valid values: section, patch, none." + return _TRAIN_PATCH_DATASETS.get(cfg.TRAIN.DEPTH, PenobscotInlinePatchDataset) + + +if __name__ == "__main__": + dataset = PenobscotInlinePatchDataset("/mnt/penobscot", 100, 50, split="train") + print(len(dataset)) diff --git a/interpretation/deepseismic_interpretation/penobscot/metrics.py b/interpretation/deepseismic_interpretation/penobscot/metrics.py new file mode 100644 index 00000000..846faacc --- /dev/null +++ b/interpretation/deepseismic_interpretation/penobscot/metrics.py @@ -0,0 +1,149 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from collections import defaultdict +from ignite.metrics import Metric +import torch +import numpy as np + + +def _torch_hist(label_true, label_pred, n_class): + """Calculates the confusion matrix for the labels + + Args: + label_true ([type]): [description] + label_pred ([type]): [description] + n_class ([type]): [description] + + Returns: + [type]: [description] + """ + # TODO Add exceptions + assert len(label_true.shape) == 1, "Labels need to be 1D" + assert len(label_pred.shape) == 1, "Predictions need to be 1D" + mask = (label_true >= 0) & (label_true < n_class) + hist = torch.bincount(n_class * label_true[mask] + label_pred[mask], minlength=n_class ** 2).reshape( + n_class, n_class + ) + return hist + + +def _default_tensor(image_height, image_width, pad_value=255): + return torch.full((image_height, image_width), pad_value, dtype=torch.long) + + +# TODO: make output transform unpad and scale down mask +# scale up y_pred and remove padding +class InlineMeanIoU(Metric): + """Compute Mean IoU for Inline + + Notes: + This metric collects all the patches and recomposes the predictions and masks + into inlines. These are then used to calculate the mean IoU. + """ + + def __init__( + self, + image_height, + image_width, + patch_size, + num_classes, + padding=0, + scale=1, + pad_value=255, + output_transform=lambda x: x, + ): + """Create instance of InlineMeanIoU + + Args: + image_height (int): height of inline + image_width (int): width of inline + patch_size (int): patch size + num_classes (int): number of classes in dataset + padding (int, optional): the amount of padding to height and width, + e.g 200 padded to 256 - padding=56. Defaults to 0 + scale (int, optional): the scale factor applied to the patch, + e.g 100 scaled to 200 - scale=2. Defaults to 1 + pad_value (int): the constant value used for padding Defaults to 255 + output_transform (callable, optional): a callable that is used to transform + the ignite.engine.Engine's `process_function`'s output into the form + expected by the metric. This can be useful if, for example, if you have + a multi-output model and you want to compute the metric with respect to + one of the outputs. + """ + self._image_height = image_height + self._image_width = image_width + self._patch_size = patch_size + self._pad_value = pad_value + self._num_classes = num_classes + self._scale = scale + self._padding = padding + super(InlineMeanIoU, self).__init__(output_transform=output_transform) + + def reset(self): + self._pred_dict = defaultdict( + lambda: _default_tensor( + self._image_height * self._scale, self._image_width * self._scale, pad_value=self._pad_value, + ) + ) + self._mask_dict = defaultdict( + lambda: _default_tensor( + self._image_height * self._scale, self._image_width * self._scale, pad_value=self._pad_value, + ) + ) + + def update(self, output): + y_pred, y, ids, patch_locations = output + # TODO: Make assertion exception + max_prediction = y_pred.max(1)[1].squeeze() + assert y.shape == max_prediction.shape, "Shape not the same" + + for pred, mask, id, patch_loc in zip(max_prediction, y, ids, patch_locations): + # ! With overlapping patches this does not aggregate the results, + # ! it simply overwrites them + # If patch is padded ingore padding + pad = int(self._padding // 2) + pred = pred[pad : pred.shape[0] - pad, pad : pred.shape[1] - pad] + mask = mask[pad : mask.shape[0] - pad, pad : mask.shape[1] - pad] + + # Get the ares of the mask that is not padded + # Determine the left top edge and bottom right edge + # Use this to calculate the rectangular area that contains predictions + non_padded_mask = torch.nonzero((mask - self._pad_value).abs()) + y_start, x_start = non_padded_mask.min(0)[0] + y_end, x_end = non_padded_mask.max(0)[0] + height = (y_end + 1) - y_start + width = (x_end + 1) - x_start + + self._pred_dict[id][ + patch_loc[0] * 2 : patch_loc[0] * 2 + height, patch_loc[1] * 2 : patch_loc[1] * 2 + width, + ] = pred[y_start : y_end + 1, x_start : x_end + 1] + + self._mask_dict[id][ + patch_loc[0] * 2 : patch_loc[0] * 2 + height, patch_loc[1] * 2 : patch_loc[1] * 2 + width, + ] = mask[y_start : y_end + 1, x_start : x_end + 1] + + def iou_per_inline(self): + iou_per_inline = {} + for id in self._pred_dict: + confusion_matrix = _torch_hist( + torch.flatten(self._mask_dict[id]), + torch.flatten(self._pred_dict[id]), # Get the maximum index + self._num_classes, + ) + hist = confusion_matrix.cpu().numpy() + iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)) + iou_per_inline[id] = np.nanmean(iu) + return iou_per_inline + + @property + def predictions(self): + return self._pred_dict + + @property + def masks(self): + return self._mask_dict + + def compute(self): + iou_dict = self.iou_per_inline() + return np.mean(list(iou_dict.values())) diff --git a/interpretation/requirements.txt b/interpretation/requirements.txt new file mode 100644 index 00000000..c03e7464 --- /dev/null +++ b/interpretation/requirements.txt @@ -0,0 +1,3 @@ +numpy>=1.17.0 +azure-cli-core +azureml-sdk==1.0.74 \ No newline at end of file diff --git a/setup.cfg b/interpretation/setup.cfg similarity index 100% rename from setup.cfg rename to interpretation/setup.cfg diff --git a/setup.py b/interpretation/setup.py similarity index 60% rename from setup.py rename to interpretation/setup.py index 3d2cc7b3..38be7a20 100644 --- a/setup.py +++ b/interpretation/setup.py @@ -1,45 +1,38 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + import setuptools -with open("README.md", "r") as f: +with open("../README.md", "r") as f: long_description = f.read() +with open("requirements.txt") as f: + requirements = f.read().splitlines() + + setuptools.setup( author="DeepSeismic Maintainers", author_email="deepseismic@microsoft.com", classifiers=[ + "Development Status :: 1 - Alpha", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Topic :: Scientific/Engineering", "Topic :: Software Development", ], - dependency_links=[ - "https://github.com/opesci/devito/archive/v3.5.tar.gz#egg=devito-3.5" - ], description="DeepSeismic", - install_requires=[ - "click==7.0", - "devito==3.5", - "h5py==2.9.0", - "numpy==1.17.0", - "scipy==1.3.0", - "sympy==1.4", - ], + install_requires=requirements, license="MIT", long_description=long_description, long_description_content_type="text/markdown", - name="deepseismic", - packages=setuptools.find_packages( - include=["deepseismic", "deepseismic.*"] - ), + name="deepseismic_interpretation", + packages=setuptools.find_packages(include=["deepseismic_interpretation", "deepseismic_interpretation.*"]), platforms="any", - python_requires=">= 3.5", - scripts=["bin/ds"], + python_requires=">=3.6", setup_requires=["pytest-runner"], tests_require=["pytest"], url="https://github.com/microsoft/deepseismic", diff --git a/pyproject.toml b/pyproject.toml index a8f43fef..446077b9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,2 +1,16 @@ [tool.black] -line-length = 79 +line-length = 120 +include = '\.pyi?$' +exclude = ''' +/( + \.git + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | _build + | buck-out + | build + | dist +)/ +''' diff --git a/scripts/autoformat.sh b/scripts/autoformat.sh new file mode 100755 index 00000000..574a2a44 --- /dev/null +++ b/scripts/autoformat.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +# autoformats all files in the repo to black + +# example of using regex -regex ".*\.\(py\|ipynb\|md\|txt\)" +find . -type f -regex ".*\.py" -exec black {} + diff --git a/scripts/data_symlink.sh b/scripts/data_symlink.sh new file mode 100755 index 00000000..31fcc8ca --- /dev/null +++ b/scripts/data_symlink.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# Azure VMs lose mounts after restart - this symlinks the data folder from user's +# home directory after VM restart + +user=$(whoami) +sudo chown -R ${user} /mnt +sudo chgrp -R ${user} /mnt +ln -s ~/dutchf3 /mnt diff --git a/scripts/download_penobscot.sh b/scripts/download_penobscot.sh new file mode 100755 index 00000000..f9e7cb75 --- /dev/null +++ b/scripts/download_penobscot.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# commitHash: +# url: https://zenodo.org/record/1341774 +# +# Download the penobscot dataset and extract +# Files description +# File Format Num Files Total size (MB) +# H1-H7 XYZ 7 87.5 +# Seismic inlines TIF 601 1,700 +# Seismic crosslines TIF 481 1,700 +# Labeled inlines PNG 601 4.9 +# Labeled crosslines PNG 481 3.9 +# Seismic tiles (train) PNG 75,810 116 +# Seismic labels (train) JSON 2 1.5 +# Seismic tiles (test) PNG 28,000 116 +# Seismic labels (test) JSON 2 0.5 +# Args: directory to download and extract data to +# Example: ./download_penobscot.sh /mnt/penobscot + + +echo Extracting to $1 +cd $1 +# Download the files: +wget https://zenodo.org/record/1341774/files/crosslines.zip +wget https://zenodo.org/record/1341774/files/inlines.zip +wget https://zenodo.org/record/1341774/files/horizons.zip +wget https://zenodo.org/record/1341774/files/masks.zip +wget https://zenodo.org/record/1341774/files/tiles_crosslines.zip +wget https://zenodo.org/record/1341774/files/tiles_inlines.zip + +# Check that the md5 checksum matches to varify file integrity +# +# Expected output: +# MD5(crosslines.zip)= 7bbe432052fe41c6009d9437fd0929b8 +# MD5(horizons.zip)= 42c104fafbb8e79695ae23527a91ee78 +# MD5(inlines.zip)= 0553676ef48879f590378cafc12d165d +# MD5(masks.zip)= 12f142cb33af55c3b447401ebd81aba1 +# MD5(tiles_crosslines.zip)= 8dbd99da742ac9c6f9b63f8c6f925f6d +# MD5(tiles_inlines.zip)= 955e2f9afb01878df2f71f0074736e42 + +openssl dgst -md5 crosslines.zip +openssl dgst -md5 horizons.zip +openssl dgst -md5 inlines.zip +openssl dgst -md5 masks.zip +openssl dgst -md5 tiles_crosslines.zip +openssl dgst -md5 tiles_inlines.zip + +# Unzip the data +unzip crosslines.zip +unzip inlines.zip +unzip horizons.zip +unzip masks.zip +unzip tiles_crosslines.zip +unzip tiles_inlines.zip + +echo Download complete. \ No newline at end of file diff --git a/scripts/env_reinstall.sh b/scripts/env_reinstall.sh new file mode 100755 index 00000000..9b2f2935 --- /dev/null +++ b/scripts/env_reinstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +conda env remove -n seismic-interpretation +yes | conda env create -f environment/anaconda/local/environment.yml +# don't use conda here as build VM's shell isn't setup when running as a build agent +source activate seismic-interpretation +pip install -e cv_lib +pip install -e interpretation +# temporary DS VM bugfix +yes | conda install pytorch torchvision cudatoolkit=9.2 -c pytorch diff --git a/scripts/kill_windows.sh b/scripts/kill_windows.sh new file mode 100755 index 00000000..4a5caaa6 --- /dev/null +++ b/scripts/kill_windows.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# Script to kill multiple tmux windows + +tmux killw -t hrnet +tmux killw -t hrnet_section_depth +tmux killw -t hrnet_patch_depth + +tmux killw -t seresnet_unet +tmux killw -t seresnet_unet_section_depth +tmux killw -t seresnet_unet_patch_depth \ No newline at end of file diff --git a/scripts/parallel_training.sh b/scripts/parallel_training.sh new file mode 100755 index 00000000..afc255ce --- /dev/null +++ b/scripts/parallel_training.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# Script to run multiple models in parallel on multi-gpu machine + +workspace=../experiments/segmentation/penobscot/local +tmux neww -d -n hrnet +tmux neww -d -n hrnet_section_depth +tmux neww -d -n hrnet_patch_depth +tmux neww -d -n seresnet_unet +tmux neww -d -n seresnet_unet_section_depth +tmux neww -d -n seresnet_unet_patch_depth + +tmux send -t hrnet "source activate seismic-interpretation && cd ${workspace}" ENTER +tmux send -t hrnet "CUDA_VISIBLE_DEVICES=0 python train.py OUTPUT_DIR /data/output/hrnet --cfg 'configs/hrnet.yaml'" ENTER + +tmux send -t hrnet_patch_depth "source activate seismic-interpretation && cd ${workspace}" ENTER +tmux send -t hrnet_patch_depth "CUDA_VISIBLE_DEVICES=1 python train.py OUTPUT_DIR /data/output/hrnet_patch TRAIN.DEPTH patch --cfg 'configs/hrnet.yaml'" ENTER + +tmux send -t hrnet_section_depth "source activate seismic-interpretation && cd ${workspace}" ENTER +tmux send -t hrnet_section_depth "CUDA_VISIBLE_DEVICES=2 python train.py OUTPUT_DIR /data/output/hrnet_section TRAIN.DEPTH section --cfg 'configs/hrnet.yaml'" ENTER + +tmux send -t seresnet_unet "source activate seismic-interpretation && cd ${workspace}" ENTER +tmux send -t seresnet_unet "CUDA_VISIBLE_DEVICES=3 python train.py OUTPUT_DIR /data/output/seresnet --cfg 'configs/seresnet_unet.yaml'" ENTER + +tmux send -t seresnet_unet_patch_depth "source activate seismic-interpretation && cd ${workspace}" ENTER +tmux send -t seresnet_unet_patch_depth "CUDA_VISIBLE_DEVICES=4 python train.py OUTPUT_DIR /data/output/seresnet_patch TRAIN.DEPTH patch --cfg 'configs/seresnet_unet.yaml'" ENTER + +tmux send -t seresnet_unet_section_depth "source activate seismic-interpretation && cd ${workspace}" ENTER +tmux send -t seresnet_unet_section_depth "CUDA_VISIBLE_DEVICES=5 python train.py OUTPUT_DIR /data/output/seresnet_section TRAIN.DEPTH section --cfg 'configs/seresnet_unet.yaml'" ENTER \ No newline at end of file diff --git a/scripts/prepare_dutchf3.py b/scripts/prepare_dutchf3.py new file mode 100644 index 00000000..40d9f4e6 --- /dev/null +++ b/scripts/prepare_dutchf3.py @@ -0,0 +1,291 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# commitHash: c76bf579a0d5090ebd32426907d051d499f3e847 +# url: https://github.com/olivesgatech/facies_classification_benchmark +"""Script to generate train and validation sets for Netherlands F3 dataset +""" +import itertools +import logging +import logging.config +import math +import warnings +from os import path + +import fire +import numpy as np +from sklearn.model_selection import train_test_split + + +def _get_splits_path(data_dir): + return path.join(data_dir, "splits") + + +def _get_labels_path(data_dir): + return path.join(data_dir, "train", "train_labels.npy") + + +def _write_split_files(splits_path, train_list, test_list, loader_type): + file_object = open(path.join(splits_path, loader_type + "_train_val.txt"), "w") + file_object.write("\n".join(train_list + test_list)) + file_object.close() + file_object = open(path.join(splits_path, loader_type + "_train.txt"), "w") + file_object.write("\n".join(train_list)) + file_object.close() + file_object = open(path.join(splits_path, loader_type + "_val.txt"), "w") + file_object.write("\n".join(test_list)) + file_object.close() + + +def _get_aline_range(aline, per_val): + # Inline sections + test_aline = math.floor(aline * per_val / 2) + test_aline_range = itertools.chain(range(0, test_aline), range(aline - test_aline, aline)) + train_aline_range = range(test_aline, aline - test_aline) + + return train_aline_range, test_aline_range + + +def split_section_train_val(data_dir, per_val=0.2, log_config=None): + """Generate train and validation files for Netherlands F3 dataset. + + Args: + data_dir (str): data directory path + per_val (float, optional): the fraction of the volume to use for validation. + Defaults to 0.2. + """ + + if log_config is not None: + logging.config.fileConfig(log_config) + + logger = logging.getLogger(__name__) + + logger.info("Splitting data into sections .... ") + logger.info(f"Reading data from {data_dir}") + + labels_path = _get_labels_path(data_dir) + logger.info(f"Loading {labels_path}") + labels = np.load(labels_path) + logger.debug(f"Data shape [iline|xline|depth] {labels.shape}") + + iline, xline, _ = labels.shape + # Inline sections + train_iline_range, test_iline_range = _get_aline_range(iline, per_val) + train_i_list = ["i_" + str(i) for i in train_iline_range] + test_i_list = ["i_" + str(i) for i in test_iline_range] + + # Xline sections + train_xline_range, test_xline_range = _get_aline_range(xline, per_val) + train_x_list = ["x_" + str(x) for x in train_xline_range] + test_x_list = ["x_" + str(x) for x in test_xline_range] + + train_list = train_x_list + train_i_list + test_list = test_x_list + test_i_list + + # write to files to disk + splits_path = _get_splits_path(data_dir) + _write_split_files(splits_path, train_list, test_list, "section") + + +def split_patch_train_val(data_dir, stride, patch, per_val=0.2, log_config=None): + """Generate train and validation files for Netherlands F3 dataset. + + Args: + data_dir (str): data directory path + stride (int): stride to use when sectioning of the volume + patch (int): size of patch to extract + per_val (float, optional): the fraction of the volume to use for validation. + Defaults to 0.2. + """ + + if log_config is not None: + logging.config.fileConfig(log_config) + + logger = logging.getLogger(__name__) + + logger.info("Splitting data into patches .... ") + logger.info(f"Reading data from {data_dir}") + + labels_path = _get_labels_path(data_dir) + logger.info(f"Loading {labels_path}") + labels = np.load(labels_path) + logger.debug(f"Data shape [iline|xline|depth] {labels.shape}") + + iline, xline, depth = labels.shape + # Inline sections + train_iline_range, test_iline_range = _get_aline_range(iline, per_val) + + # Xline sections + train_xline_range, test_xline_range = _get_aline_range(xline, per_val) + + # Generate patches from sections + # Process inlines + horz_locations = range(0, xline - patch, stride) + vert_locations = range(0, depth - patch, stride) + logger.debug("Generating Inline patches") + logger.debug(horz_locations) + logger.debug(vert_locations) + + def _i_extract_patches(iline_range, horz_locations, vert_locations): + for i in iline_range: + locations = ([j, k] for j in horz_locations for k in vert_locations) + for j, k in locations: + yield "i_" + str(i) + "_" + str(j) + "_" + str(k) + + test_i_list = list(_i_extract_patches(test_iline_range, horz_locations, vert_locations)) + train_i_list = list(_i_extract_patches(train_iline_range, horz_locations, vert_locations)) + + # Process crosslines + horz_locations = range(0, iline - patch, stride) + vert_locations = range(0, depth - patch, stride) + + def _x_extract_patches(xline_range, horz_locations, vert_locations): + for j in xline_range: + locations = ([i, k] for i in horz_locations for k in vert_locations) + for i, k in locations: + yield "x_" + str(i) + "_" + str(j) + "_" + str(k) + + test_x_list = list(_x_extract_patches(test_xline_range, horz_locations, vert_locations)) + train_x_list = list(_x_extract_patches(train_xline_range, horz_locations, vert_locations)) + + train_list = train_x_list + train_i_list + test_list = test_x_list + test_i_list + + # write to files to disk: + # NOTE: This isn't quite right we should calculate the patches again for the whole volume + splits_path = _get_splits_path(data_dir) + _write_split_files(splits_path, train_list, test_list, "patch") + + +_LOADER_TYPES = {"section": split_section_train_val, "patch": split_patch_train_val} + + +def get_split_function(loader_type): + return _LOADER_TYPES.get(loader_type, split_patch_train_val) + + +def run_split_func(loader_type, *args, **kwargs): + split_func = get_split_function(loader_type) + split_func(*args, **kwargs) + + +def split_alaudah_et_al_19(data_dir, stride, fraction_validation=0.2, loader_type="patch", log_config=None): + """Generate train and validation files (with overlap) for Netherlands F3 dataset. + The original split method from https://github.com/olivesgatech/facies_classification_benchmark + DON'T USE, SEE NOTES BELOW + + Args: + data_dir (str): data directory path + stride (int): stride to use when sectioning of the volume + fraction_validation (float, optional): the fraction of the volume to use for validation. + Defaults to 0.2. + loader_type (str, optional): type of data loader, can be "patch" or "section". + Defaults to "patch". + log_config (str, optional): path to log config. Defaults to None. + + Notes: + Only kept for reproducibility. It generates overlapping train and val which makes + validation results unreliable. + """ + + if log_config is not None: + logging.config.fileConfig(log_config) + + warnings.warn("THIS CREATES OVERLAPPING TRAINING AND VALIDATION SETS") + + assert loader_type in [ + "section", + "patch", + ], f"Loader type {loader_type} is not valid. \ + Please specify either 'section' or 'patch' for loader_type" + + # create inline and crossline pacthes for training and validation: + logger = logging.getLogger(__name__) + + logger.info("Reading data from {data_dir}") + + labels_path = _get_labels_path(data_dir) + logger.info("Loading {labels_path}") + labels = np.load(labels_path) + iline, xline, depth = labels.shape + logger.debug(f"Data shape [iline|xline|depth] {labels.shape}") + + if loader_type == "section": + i_list = ["i_" + str(i) for i in range(iline)] + x_list = ["x_" + str(x) for x in range(xline)] + elif loader_type == "patch": + i_list = [] + horz_locations = range(0, xline - stride, stride) + vert_locations = range(0, depth - stride, stride) + logger.debug("Generating Inline patches") + logger.debug(horz_locations) + logger.debug(vert_locations) + for i in range(iline): + # for every inline: + # images are references by top-left corner: + locations = [[j, k] for j in horz_locations for k in vert_locations] + patches_list = ["i_" + str(i) + "_" + str(j) + "_" + str(k) for j, k in locations] + i_list.append(patches_list) + + # flatten the list + i_list = list(itertools.chain(*i_list)) + + x_list = [] + horz_locations = range(0, iline - stride, stride) + vert_locations = range(0, depth - stride, stride) + for j in range(xline): + # for every xline: + # images are references by top-left corner: + locations = [[i, k] for i in horz_locations for k in vert_locations] + patches_list = ["x_" + str(i) + "_" + str(j) + "_" + str(k) for i, k in locations] + x_list.append(patches_list) + + # flatten the list + x_list = list(itertools.chain(*x_list)) + + list_train_val = i_list + x_list + + # create train and test splits: + train_list, test_list = train_test_split(list_train_val, test_size=fraction_validation, shuffle=True) + + # write to files to disk: + splits_path = _get_splits_path(data_dir) + _write_split_files(splits_path, train_list, test_list, loader_type) + + +# TODO: Try https://github.com/Chilipp/docrep for doscstring reuse +class SplitTrainValCLI(object): + def section(self, data_dir, per_val=0.2, log_config=None): + """Generate section based train and validation files for Netherlands F3 dataset. + + Args: + data_dir (str): data directory path + per_val (float, optional): the fraction of the volume to use for validation. + Defaults to 0.2. + log_config (str): path to log configurations + """ + return split_section_train_val(data_dir, per_val=per_val, log_config=log_config) + + def patch(self, data_dir, stride, patch, per_val=0.2, log_config=None): + """Generate patch based train and validation files for Netherlands F3 dataset. + + Args: + data_dir (str): data directory path + stride (int): stride to use when sectioning of the volume + patch (int): size of patch to extract + per_val (float, optional): the fraction of the volume to use for validation. + Defaults to 0.2. + log_config (str): path to log configurations + """ + return split_patch_train_val(data_dir, stride, patch, per_val=per_val, log_config=log_config) + + +if __name__ == "__main__": + """Example: + python prepare_data.py split_train_val section --data-dir=/mnt/dutch + or + python prepare_data.py split_train_val patch --data-dir=/mnt/dutch --stride=50 --patch=100 + + """ + fire.Fire( + {"split_train_val": SplitTrainValCLI, "split_alaudah_et_al_19": split_alaudah_et_al_19,} + ) diff --git a/scripts/prepare_penobscot.py b/scripts/prepare_penobscot.py new file mode 100644 index 00000000..754993be --- /dev/null +++ b/scripts/prepare_penobscot.py @@ -0,0 +1,87 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# commitHash: c76bf579a0d5090ebd32426907d051d499f3e847 +# url: https://github.com/olivesgatech/facies_classification_benchmark +"""Script to generate train and validation sets for Netherlands F3 dataset +""" +import itertools +import logging +import logging.config +import math +import fire +import os +import shutil +from toolz import partition_all +import glob + + +def _create_directory(dir_path, overwrite=False): + logger = logging.getLogger("__name__") + if overwrite: + logger.info(f"Set to overwrite. Removing {dir_path}") + shutil.rmtree(dir_path) + + try: + logger.info(f"Creating {dir_path}") + os.mkdir(dir_path) + return dir_path + except FileExistsError as e: + logger.warn( + f"Can't write to {dir_path} as it already exists. Please specify \ + overwrite=true or delete folder" + ) + raise e + + +def _copy_files(files_iter, new_dir): + logger = logging.getLogger("__name__") + for f in files_iter: + logger.debug(f"Copying {f} to {new_dir}") + shutil.copy(f, new_dir) + + +def _split_train_val_test(partition, val_ratio, test_ratio): + total_samples = len(partition) + val_samples = math.floor(val_ratio * total_samples) + test_samples = math.floor(test_ratio * total_samples) + train_samples = total_samples - (val_samples + test_samples) + train_list = partition[:train_samples] + val_list = partition[train_samples : train_samples + val_samples] + test_list = partition[train_samples + val_samples : train_samples + val_samples + test_samples] + return train_list, val_list, test_list + + +def split_inline(data_dir, val_ratio, test_ratio, overwrite=False, exclude_files=None): + """Splits the inline data into train, val and test. + + Args: + data_dir (str): path to directory that holds the data + val_ratio (float): the ratio of the partition that will be used for validation + test_ratio (float): the ratio of the partition that they should use for testing + exclude_files (list[str]): filenames to exclude from dataset, such as ones that contain + artifacts. Example:['image1.tiff'] + """ + num_partitions = 5 + image_dir = os.path.join(data_dir, "inlines") + dir_paths = (os.path.join(image_dir, ddir) for ddir in ("train", "val", "test")) + locations_list = [_create_directory(d, overwrite=overwrite) for d in dir_paths] # train, val, test + + images_iter = glob.iglob(os.path.join(image_dir, "*.tiff")) + + if exclude_files is not None: + images_list = list(itertools.filterfalse(lambda x: x in exclude_files, images_iter)) + else: + images_list = list(images_iter) + + num_elements = math.ceil(len(images_list) / num_partitions) + for partition in partition_all(num_elements, images_list): # Partition files into N partitions + for files_list, dest_dir in zip(_split_train_val_test(partition, val_ratio, test_ratio), locations_list): + _copy_files(files_list, dest_dir) + + +if __name__ == "__main__": + """Example: + python prepare_data.py split_inline --data-dir=/mnt/penobscot --val-ratio=.1 --test-ratio=.2 + + """ + fire.Fire({"split_inline": split_inline}) diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 00000000..3c873030 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,11 @@ +# Tests + +This project uses unit and integration tests only with Python files and notebooks: + + * In the unit tests we just make sure our metrics are sane. + * In the integration tests we test that our models execute training and scoring scripts and that our notebooks all run to completion. + + ## CI/CD + + You can find build configuration files in the `cicd` folder. + diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/cicd/component_governance.yml b/tests/cicd/component_governance.yml new file mode 100644 index 00000000..cae6b7a9 --- /dev/null +++ b/tests/cicd/component_governance.yml @@ -0,0 +1,26 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# Starter pipeline for legal clearance +# Start with a minimal pipeline that you can customize to build and deploy your code. +# Add steps that build, run tests, deploy, and more: +# https://aka.ms/yaml + +# Pull request against these branches will trigger this build +pr: +- master +- staging + +trigger: +- master +- staging + +pool: + vmImage: 'ubuntu-latest' + +steps: +- task: ComponentGovernanceComponentDetection@0 + inputs: + scanType: 'Register' + verbosity: 'Verbose' + alertWarningLevel: 'High' diff --git a/tests/cicd/main_build.yml b/tests/cicd/main_build.yml new file mode 100644 index 00000000..017137b4 --- /dev/null +++ b/tests/cicd/main_build.yml @@ -0,0 +1,270 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# Pull request against these branches will trigger this build +pr: +- master +- staging + +# Any commit to this branch will trigger the build. +trigger: +- master +- staging + +jobs: +# partially disable setup for now - done manually on build VM +- job: setup + timeoutInMinutes: 10 + displayName: Setup + pool: + name: deepseismicagentpool + steps: + - bash: | + echo "Running setup..." + pwd + ls + git branch + uname -ra + + ./scripts/env_reinstall.sh + + # copy your model files like so - using dummy file to illustrate + azcopy --quiet --source:https://$(storagename).blob.core.windows.net/models/model --source-key $(storagekey) --destination /home/alfred/models/your_model_name + +- job: unit_tests_job + dependsOn: setup + timeoutInMinutes: 5 + displayName: Unit Tests Job + pool: + name: deepseismicagentpool + steps: + - bash: | + echo "Starting unit tests" + source activate seismic-interpretation + pytest --durations=0 cv_lib/tests/ + echo "Unit test job passed" + + +################################################################################################### +# LOCAL PATCH JOBS +################################################################################################### + +- job: hrnet_penobscot + dependsOn: setup + timeoutInMinutes: 5 + displayName: hrnet penobscot + pool: + name: deepseismicagentpool + steps: + - bash: | + conda env list + source activate seismic-interpretation + # run the tests + cd experiments/interpretation/penobscot/local + python train.py 'DATASET.ROOT' '/home/alfred/data/penobscot' 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/hrnet.yaml --debug + # find the latest model which we just trained + model=$(ls -td */seg_hrnet/*/* | head -1) + echo ${model} + # # try running the test script + python test.py 'DATASET.ROOT' '/home/alfred/data/penobscot' 'TEST.MODEL_PATH' ${model}/seg_hrnet_running_model_1.pth --cfg=configs/hrnet.yaml --debug + + +- job: seresnet_unet_penobscot + dependsOn: setup + timeoutInMinutes: 5 + displayName: seresnet_unet penobscot + pool: + name: deepseismicagentpool + steps: + - bash: | + conda env list + source activate seismic-interpretation + # run the tests + cd experiments/interpretation/penobscot/local + python train.py 'DATASET.ROOT' '/home/alfred/data/penobscot' 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/seresnet_unet.yaml --debug + # find the latest model which we just trained + model=$(ls -td */resnet_unet/*/* | head -1) + echo ${model} + # try running the test script + python test.py 'DATASET.ROOT' '/home/alfred/data/penobscot' 'TEST.MODEL_PATH' ${model}/resnet_unet_running_model_1.pth --cfg=configs/seresnet_unet.yaml --debug + +- job: hrnet_dutchf3 + dependsOn: setup + timeoutInMinutes: 5 + displayName: hrnet dutchf3 + pool: + name: deepseismicagentpool + steps: + - bash: | + source activate seismic-interpretation + # run the tests + cd experiments/interpretation/dutchf3_patch/local + python train.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/hrnet.yaml --debug + # find the latest model which we just trained + model=$(ls -td */seg_hrnet/*/* | head -1) + echo ${model} + # try running the test script + python test.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TEST.MODEL_PATH' ${model}/seg_hrnet_running_model_1.pth --cfg=configs/hrnet.yaml --debug + + +- job: unet_dutchf3 + dependsOn: setup + timeoutInMinutes: 5 + displayName: unet dutchf3 + pool: + name: deepseismicagentpool + steps: + - bash: | + source activate seismic-interpretation + # run the tests + cd experiments/interpretation/dutchf3_patch/local + python train.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/unet.yaml --debug + # find the latest model which we just trained + model=$(ls -td */resnet_unet/*/* | head -1) + echo ${model} + # try running the test script + python test.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TEST.MODEL_PATH' ${model}/resnet_unet_running_model_1.pth --cfg=configs/unet.yaml --debug + +- job: seresnet_unet_dutchf3 + dependsOn: setup + timeoutInMinutes: 5 + displayName: seresnet unet dutchf3 + pool: + name: deepseismicagentpool + steps: + - bash: | + source activate seismic-interpretation + # run the tests + cd experiments/interpretation/dutchf3_patch/local + python train.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/seresnet_unet.yaml --debug + # find the latest model which we just trained + model=$(ls -td */resnet_unet/*/* | head -1) + # try running the test script + python test.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TEST.MODEL_PATH' ${model}/resnet_unet_running_model_1.pth --cfg=configs/seresnet_unet.yaml --debug + +- job: patch_deconvnet_dutchf3 + dependsOn: setup + timeoutInMinutes: 5 + displayName: patch deconvnet dutchf3 + pool: + name: deepseismicagentpool + steps: + - bash: | + source activate seismic-interpretation + # run the tests + cd experiments/interpretation/dutchf3_patch/local + python train.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TRAIN.BATCH_SIZE_PER_GPU' 1 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/patch_deconvnet.yaml --debug + # find the latest model which we just trained + model=$(ls -td */patch_deconvnet/*/* | head -1) + # try running the test script + python test.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'VALIDATION.BATCH_SIZE_PER_GPU' 1 'TEST.MODEL_PATH' ${model}/patch_deconvnet_running_model_1.pth --cfg=configs/patch_deconvnet.yaml --debug + +- job: patch_deconvnet_skip_dutchf3 + dependsOn: setup + timeoutInMinutes: 5 + displayName: patch deconvnet skip dutchf3 + pool: + name: deepseismicagentpool + steps: + - bash: | + source activate seismic-interpretation + # run the tests + cd experiments/interpretation/dutchf3_patch/local + python train.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TRAIN.BATCH_SIZE_PER_GPU' 1 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/patch_deconvnet_skip.yaml --debug + # find the latest model which we just trained + model=$(ls -td */patch_deconvnet_skip/*/* | head -1) + # try running the test script + python test.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'VALIDATION.BATCH_SIZE_PER_GPU' 1 'TEST.MODEL_PATH' ${model}/patch_deconvnet_skip_running_model_1.pth --cfg=configs/patch_deconvnet_skip.yaml --debug + + +################################################################################################### +# DISTRIBUTED PATCH JOBS +################################################################################################### + +- job: hrnet_dutchf3_dist + dependsOn: setup + timeoutInMinutes: 5 + displayName: hrnet dutchf3 distributed + pool: + name: deepseismicagentpool + steps: + - bash: | + source activate seismic-interpretation + # run the tests + cd experiments/interpretation/dutchf3_patch/distributed + python -m torch.distributed.launch --nproc_per_node=$(nproc) train.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/hrnet.yaml --debug + +- job: patch_deconvnet_skip_dist + dependsOn: setup + timeoutInMinutes: 5 + displayName: patch deconvnet skip distributed + pool: + name: deepseismicagentpool + steps: + - bash: | + source activate seismic-interpretation + # run the tests + cd experiments/interpretation/dutchf3_patch/distributed + python -m torch.distributed.launch --nproc_per_node=$(nproc) train.py 'TRAIN.BATCH_SIZE_PER_GPU' 1 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/patch_deconvnet_skip.yaml --debug + +- job: patch_deconvnet_dist + dependsOn: setup + timeoutInMinutes: 5 + displayName: patch deconvnet distributed + pool: + name: deepseismicagentpool + steps: + - bash: | + source activate seismic-interpretation + # run the tests + cd experiments/interpretation/dutchf3_patch/distributed + python -m torch.distributed.launch --nproc_per_node=$(nproc) train.py 'TRAIN.BATCH_SIZE_PER_GPU' 1 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/patch_deconvnet.yaml --debug + +- job: seresnet_unet_dutchf3_dist + dependsOn: setup + timeoutInMinutes: 5 + displayName: seresnet unet dutchf3 distributed + pool: + name: deepseismicagentpool + steps: + - bash: | + source activate seismic-interpretation + # run the tests + cd experiments/interpretation/dutchf3_patch/distributed + python -m torch.distributed.launch --nproc_per_node=$(nproc) train.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/seresnet_unet.yaml --debug + +- job: unet_dutchf3_dist + dependsOn: setup + timeoutInMinutes: 5 + displayName: unet dutchf3 distributed + pool: + name: deepseismicagentpool + steps: + - bash: | + source activate seismic-interpretation + # run the tests + cd experiments/interpretation/dutchf3_patch/distributed + python -m torch.distributed.launch --nproc_per_node=$(nproc) train.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/unet.yaml --debug + +################################################################################################### +# LOCAL SECTION JOBS +################################################################################################### + +- job: section_deconvnet_skip + dependsOn: setup + timeoutInMinutes: 5 + displayName: section deconvnet skip + pool: + name: deepseismicagentpool + steps: + - bash: | + source activate seismic-interpretation + # run the tests + cd experiments/interpretation/dutchf3_section/local + python train.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/section_deconvnet_skip.yaml --debug + # find the latest model which we just trained + model=$(ls -td */section_deconvnet_skip/*/* | head -1) + echo ${model} + # try running the test script + python test.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TEST.MODEL_PATH' ${model}/section_deconvnet_skip_running_model_1.pth --cfg=configs/section_deconvnet_skip.yaml --debug + diff --git a/tests/cicd/notebooks_build.yml b/tests/cicd/notebooks_build.yml new file mode 100644 index 00000000..b1cb1d00 --- /dev/null +++ b/tests/cicd/notebooks_build.yml @@ -0,0 +1,56 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# Pull request against these branches will trigger this build +pr: +- master +- staging + +# Any commit to this branch will trigger the build. +trigger: +- master +- staging + +jobs: + +# partially disable setup for now - done manually on build VM +- job: setup + timeoutInMinutes: 10 + displayName: Setup + pool: + name: deepseismicagentpool + + steps: + - bash: | + echo "Running setup..." + pwd + ls + git branch + uname -ra + + ./scripts/env_reinstall.sh + + # copy your model files like so - using dummy file to illustrate + azcopy --quiet --source:https://$(storagename).blob.core.windows.net/models/model --source-key $(storagekey) --destination /home/alfred/models/your_model_name + +- job: HRNET_demo + dependsOn: setup + timeoutInMinutes: 5 + displayName: HRNET demo + pool: + name: deepseismicagentpool + steps: + - bash: | + source activate seismic-interpretation + pytest -s tests/cicd/src/notebook_integration_tests.py --nbname examples/interpretation/notebooks/HRNet_Penobscot_demo_notebook.ipynb --dataset_root /home/alfred/data/penobscot + +- job: F3_block_training_and_evaluation_local + dependsOn: setup + timeoutInMinutes: 5 + displayName: F3 block training and evaluation local + pool: + name: deepseismicagentpool + steps: + - bash: | + source activate seismic-interpretation + pytest -s tests/cicd/src/notebook_integration_tests.py --nbname examples/interpretation/notebooks/F3_block_training_and_evaluation_local.ipynb --dataset_root /home/alfred/data/dutch_f3/data diff --git a/tests/cicd/penobscot.yml b/tests/cicd/penobscot.yml new file mode 100644 index 00000000..3d7ae4e3 --- /dev/null +++ b/tests/cicd/penobscot.yml @@ -0,0 +1,179 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# Pull request against these branches will trigger this build +pr: +- master +- staging + +# Any commit to this branch will trigger the build. +trigger: +- master +- staging + +jobs: + +# ################################################################################################### +# # LOCAL PATCH JOBS +# ################################################################################################### + +# - job: hrnet_penobscot +# timeoutInMinutes: 5 +# displayName: hrnet penobscot +# pool: +# name: deepseismicagentpool +# steps: +# - bash: | +# conda env list +# source activate seismic-interpretation +# # run the tests +# cd experiments/interpretation/penobscot/local +# python train.py 'DATASET.ROOT' '/home/alfred/data/penobscot' 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/hrnet.yaml --debug +# # find the latest model which we just trained +# model=$(ls -td */seg_hrnet/*/* | head -1) +# echo ${model} +# # # try running the test script +# python test.py 'DATASET.ROOT' '/home/alfred/data/penobscot' 'TEST.MODEL_PATH' ${model}/seg_hrnet_running_model_1.pth --cfg=configs/hrnet.yaml --debug + +# - job: seresnet_unet_penobscot +# timeoutInMinutes: 5 +# displayName: seresnet_unet penobscot +# pool: +# name: deepseismicagentpool +# steps: +# - bash: | +# conda env list +# source activate seismic-interpretation +# # run the tests +# cd experiments/interpretation/penobscot/local +# python train.py 'DATASET.ROOT' '/home/alfred/data/penobscot' 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/seresnet_unet.yaml --debug +# # find the latest model which we just trained +# model=$(ls -td */resnet_unet/*/* | head -1) +# echo ${model} +# # try running the test script +# python test.py 'DATASET.ROOT' '/home/alfred/data/penobscot' 'TEST.MODEL_PATH' ${model}/resnet_unet_running_model_1.pth --cfg=configs/seresnet_unet.yaml --debug + +# - job: hrnet_dutchf3 +# timeoutInMinutes: 5 +# displayName: hrnet dutchf3 +# pool: +# name: deepseismicagentpool +# steps: +# - bash: | +# source activate seismic-interpretation +# # run the tests +# cd experiments/interpretation/dutchf3_patch/local +# python train.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/hrnet.yaml --debug +# # find the latest model which we just trained +# model=$(ls -td */seg_hrnet/*/* | head -1) +# echo ${model} +# # try running the test script +# python test.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TEST.MODEL_PATH' ${model}/seg_hrnet_running_model_1.pth --cfg=configs/hrnet.yaml --debug + +# - job: unet_dutchf3 +# timeoutInMinutes: 5 +# displayName: unet dutchf3 +# pool: +# name: deepseismicagentpool +# steps: +# - bash: | +# source activate seismic-interpretation +# # run the tests +# cd experiments/interpretation/dutchf3_patch/local +# python train.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/unet.yaml --debug +# # find the latest model which we just trained +# model=$(ls -td */resnet_unet/*/* | head -1) +# echo ${model} +# # try running the test script +# python test.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TEST.MODEL_PATH' ${model}/resnet_unet_running_model_1.pth --cfg=configs/unet.yaml --debug + +# - job: seresnet_unet_dutchf3 +# timeoutInMinutes: 5 +# displayName: seresnet unet dutchf3 +# pool: +# name: deepseismicagentpool +# steps: +# - bash: | +# source activate seismic-interpretation +# # run the tests +# cd experiments/interpretation/dutchf3_patch/local +# python train.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/seresnet_unet.yaml --debug +# # find the latest model which we just trained +# model=$(ls -td */resnet_unet/*/* | head -1) +# # try running the test script +# python test.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TEST.MODEL_PATH' ${model}/resnet_unet_running_model_1.pth --cfg=configs/seresnet_unet.yaml --debug + +# - job: patch_deconvnet_dutchf3 +# timeoutInMinutes: 5 +# displayName: patch deconvnet dutchf3 +# pool: +# name: deepseismicagentpool +# steps: +# - bash: | +# source activate seismic-interpretation +# # run the tests +# cd experiments/interpretation/dutchf3_patch/local +# python train.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TRAIN.BATCH_SIZE_PER_GPU' 1 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/patch_deconvnet.yaml --debug +# # find the latest model which we just trained +# model=$(ls -td */patch_deconvnet/*/* | head -1) +# # try running the test script +# python test.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'VALIDATION.BATCH_SIZE_PER_GPU' 1 'TEST.MODEL_PATH' ${model}/patch_deconvnet_running_model_1.pth --cfg=configs/patch_deconvnet.yaml --debug + +# - job: patch_deconvnet_skip_dutchf3 +# timeoutInMinutes: 5 +# displayName: patch deconvnet skip dutchf3 +# pool: +# name: deepseismicagentpool +# steps: +# - bash: | +# source activate seismic-interpretation +# # run the tests +# cd experiments/interpretation/dutchf3_patch/local +# python train.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TRAIN.BATCH_SIZE_PER_GPU' 1 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/patch_deconvnet_skip.yaml --debug +# # find the latest model which we just trained +# model=$(ls -td */patch_deconvnet_skip/*/* | head -1) +# # try running the test script +# python test.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'VALIDATION.BATCH_SIZE_PER_GPU' 1 'TEST.MODEL_PATH' ${model}/patch_deconvnet_skip_running_model_1.pth --cfg=configs/patch_deconvnet_skip.yaml --debug + + +# ################################################################################################### +# # DISTRIBUTED PATCH JOBS +# ################################################################################################### + +# - job: hrnet_dutchf3_dist +# timeoutInMinutes: 5 +# displayName: hrnet dutchf3 distributed +# pool: +# name: deepseismicagentpool +# steps: +# - bash: | +# source activate seismic-interpretation +# # run the tests +# cd experiments/interpretation/dutchf3_patch/distributed +# python -m torch.distributed.launch --nproc_per_node=$(nproc) train.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/hrnet.yaml --debug + + +- job: unet_dutchf3_dist + timeoutInMinutes: 5 + displayName: unet dutchf3 distributed + pool: + name: deepseismicagentpool + steps: + - bash: | + source activate seismic-interpretation + # run the tests + cd experiments/interpretation/dutchf3_patch/distributed + python -m torch.distributed.launch --nproc_per_node=$(nproc) train.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/unet.yaml --debug + +- job: seresnet_unet_dutchf3_dist + timeoutInMinutes: 5 + displayName: seresnet unet dutchf3 distributed + pool: + name: deepseismicagentpool + steps: + - bash: | + source activate seismic-interpretation + # run the tests + cd experiments/interpretation/dutchf3_patch/distributed + python -m torch.distributed.launch --nproc_per_node=$(nproc) train.py 'DATASET.ROOT' '/home/alfred/data/dutch_f3/data' 'TRAIN.END_EPOCH' 1 'TRAIN.SNAPSHOTS' 1 --cfg=configs/seresnet_unet.yaml --debug + \ No newline at end of file diff --git a/tests/cicd/src/conftest.py b/tests/cicd/src/conftest.py new file mode 100644 index 00000000..c222d3b9 --- /dev/null +++ b/tests/cicd/src/conftest.py @@ -0,0 +1,32 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import pytest + + +def pytest_addoption(parser): + parser.addoption("--nbname", action="store", type=str, default=None) + parser.addoption("--dataset_root", action="store", type=str, default=None) + + +@pytest.fixture +def nbname(request): + return request.config.getoption("--nbname") + + +@pytest.fixture +def dataset_root(request): + return request.config.getoption("--dataset_root") + + +""" +def pytest_generate_tests(metafunc): + # This is called for every test. Only get/set command line arguments + # if the argument is specified in the list of test "fixturenames". + option_value = metafunc.config.option.nbname + if 'nbname' in metafunc.fixturenames and option_value is not None: + metafunc.parametrize("nbname", [option_value]) + option_value = metafunc.config.option.dataset_root + if 'dataset_root' in metafunc.fixturenames and option_value is not None: + metafunc.parametrize("dataset_root", [option_value]) +""" diff --git a/tests/cicd/src/notebook_integration_tests.py b/tests/cicd/src/notebook_integration_tests.py new file mode 100644 index 00000000..1c0ccbf8 --- /dev/null +++ b/tests/cicd/src/notebook_integration_tests.py @@ -0,0 +1,19 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import pytest +import papermill as pm + +from tests.notebooks_common import OUTPUT_NOTEBOOK, KERNEL_NAME + +# don't add any markup as this just runs any notebook which name is supplied +# @pytest.mark.integration +# @pytest.mark.notebooks +def test_notebook_run(nbname, dataset_root): + pm.execute_notebook( + nbname, + OUTPUT_NOTEBOOK, + kernel_name=KERNEL_NAME, + parameters={"max_iterations": 3, "max_epochs": 1, "max_snapshots": 1, "dataset_root": dataset_root}, + cwd="examples/interpretation/notebooks", + ) diff --git a/tests/notebooks_common.py b/tests/notebooks_common.py new file mode 100644 index 00000000..e9f632dd --- /dev/null +++ b/tests/notebooks_common.py @@ -0,0 +1,9 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import os + +# Unless manually modified, python3 should be the name of the current jupyter kernel +# that runs on the activated conda environment +KERNEL_NAME = "python3" +OUTPUT_NOTEBOOK = "output.ipynb"